2024-11-20 14:45:15,461 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-20 14:45:15,471 main DEBUG Took 0.008814 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 14:45:15,471 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 14:45:15,472 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 14:45:15,472 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 14:45:15,473 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,480 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 14:45:15,491 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,493 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,493 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,494 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,495 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,496 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,496 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,497 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,497 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,497 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,498 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,499 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,499 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,500 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,500 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,501 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,501 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,502 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,502 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,503 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,503 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 14:45:15,504 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,504 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 14:45:15,506 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 14:45:15,508 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 14:45:15,511 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 14:45:15,511 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 14:45:15,513 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 14:45:15,514 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 14:45:15,524 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 14:45:15,526 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 14:45:15,528 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 14:45:15,528 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 14:45:15,529 main DEBUG createAppenders(={Console}) 2024-11-20 14:45:15,530 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-20 14:45:15,530 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-20 14:45:15,530 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-20 14:45:15,531 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 14:45:15,531 main DEBUG OutputStream closed 2024-11-20 14:45:15,532 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 14:45:15,532 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 14:45:15,532 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-20 14:45:15,618 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 14:45:15,621 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 14:45:15,622 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 14:45:15,624 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 14:45:15,625 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 14:45:15,625 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 14:45:15,626 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 14:45:15,626 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 14:45:15,627 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 14:45:15,627 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 14:45:15,628 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 14:45:15,628 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 14:45:15,629 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 14:45:15,629 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 14:45:15,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 14:45:15,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 14:45:15,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 14:45:15,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 14:45:15,635 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 14:45:15,635 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-20 14:45:15,640 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 14:45:15,641 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-20T14:45:15,907 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418 2024-11-20 14:45:15,910 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 14:45:15,910 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T14:45:15,918 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-20T14:45:15,949 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=380, ProcessCount=11, AvailableMemoryMB=10862 2024-11-20T14:45:15,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T14:45:15,971 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316, deleteOnExit=true 2024-11-20T14:45:15,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T14:45:15,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/test.cache.data in system properties and HBase conf 2024-11-20T14:45:15,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T14:45:15,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.log.dir in system properties and HBase conf 2024-11-20T14:45:15,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T14:45:15,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T14:45:15,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T14:45:16,067 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T14:45:16,156 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T14:45:16,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:45:16,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:45:16,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T14:45:16,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:45:16,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T14:45:16,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T14:45:16,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:45:16,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:45:16,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T14:45:16,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/nfs.dump.dir in system properties and HBase conf 2024-11-20T14:45:16,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/java.io.tmpdir in system properties and HBase conf 2024-11-20T14:45:16,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:45:16,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T14:45:16,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T14:45:16,619 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:45:17,151 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T14:45:17,224 INFO [Time-limited test {}] log.Log(170): Logging initialized @2422ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T14:45:17,304 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:45:17,370 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:45:17,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:45:17,389 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:45:17,390 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:45:17,401 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:45:17,404 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:45:17,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:45:17,579 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c1a236c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/java.io.tmpdir/jetty-localhost-38835-hadoop-hdfs-3_4_1-tests_jar-_-any-6572456863583525117/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:45:17,586 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:38835} 2024-11-20T14:45:17,586 INFO [Time-limited test {}] server.Server(415): Started @2785ms 2024-11-20T14:45:17,610 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:45:18,056 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:45:18,063 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:45:18,064 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:45:18,064 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:45:18,065 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:45:18,066 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74548cdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:45:18,066 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fc5598e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:45:18,165 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a47b0ed{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/java.io.tmpdir/jetty-localhost-38883-hadoop-hdfs-3_4_1-tests_jar-_-any-6258708672002867613/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:45:18,165 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58445dea{HTTP/1.1, (http/1.1)}{localhost:38883} 2024-11-20T14:45:18,166 INFO [Time-limited test {}] server.Server(415): Started @3365ms 2024-11-20T14:45:18,218 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:45:18,327 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:45:18,333 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:45:18,334 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:45:18,335 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:45:18,335 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:45:18,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6176039d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:45:18,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b18aeba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:45:18,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54b536b1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/java.io.tmpdir/jetty-localhost-43877-hadoop-hdfs-3_4_1-tests_jar-_-any-3600347614981391019/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:45:18,439 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59ce19fe{HTTP/1.1, (http/1.1)}{localhost:43877} 2024-11-20T14:45:18,440 INFO [Time-limited test {}] server.Server(415): Started @3639ms 2024-11-20T14:45:18,442 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:45:19,229 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/data/data2/current/BP-1758166626-172.17.0.2-1732113916695/current, will proceed with Du for space computation calculation, 2024-11-20T14:45:19,229 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/data/data4/current/BP-1758166626-172.17.0.2-1732113916695/current, will proceed with Du for space computation calculation, 2024-11-20T14:45:19,229 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/data/data3/current/BP-1758166626-172.17.0.2-1732113916695/current, will proceed with Du for space computation calculation, 2024-11-20T14:45:19,229 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/data/data1/current/BP-1758166626-172.17.0.2-1732113916695/current, will proceed with Du for space computation calculation, 2024-11-20T14:45:19,258 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:45:19,258 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:45:19,303 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x13bd59b47e7fceae with lease ID 0x197da5a227e9d1c2: Processing first storage report for DS-d901d07b-a071-473a-ad37-daf3a1d7bf83 from datanode DatanodeRegistration(127.0.0.1:44309, datanodeUuid=f0c5c159-a508-4a07-b95c-3b714d78dfae, infoPort=44933, infoSecurePort=0, ipcPort=35997, storageInfo=lv=-57;cid=testClusterID;nsid=355074844;c=1732113916695) 2024-11-20T14:45:19,304 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13bd59b47e7fceae with lease ID 0x197da5a227e9d1c2: from storage DS-d901d07b-a071-473a-ad37-daf3a1d7bf83 node DatanodeRegistration(127.0.0.1:44309, datanodeUuid=f0c5c159-a508-4a07-b95c-3b714d78dfae, infoPort=44933, infoSecurePort=0, ipcPort=35997, storageInfo=lv=-57;cid=testClusterID;nsid=355074844;c=1732113916695), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T14:45:19,305 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb0751679cade07 with lease ID 0x197da5a227e9d1c3: Processing first storage report for DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec from datanode DatanodeRegistration(127.0.0.1:40157, datanodeUuid=9a5088b2-a1aa-47aa-9ccb-7b237a8c173d, infoPort=42161, infoSecurePort=0, ipcPort=35535, storageInfo=lv=-57;cid=testClusterID;nsid=355074844;c=1732113916695) 2024-11-20T14:45:19,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb0751679cade07 with lease ID 0x197da5a227e9d1c3: from storage DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec node DatanodeRegistration(127.0.0.1:40157, datanodeUuid=9a5088b2-a1aa-47aa-9ccb-7b237a8c173d, infoPort=42161, infoSecurePort=0, ipcPort=35535, storageInfo=lv=-57;cid=testClusterID;nsid=355074844;c=1732113916695), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:45:19,305 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x13bd59b47e7fceae with lease ID 0x197da5a227e9d1c2: Processing first storage report for DS-f28056e9-6aae-4887-948c-1efdb947b206 from datanode DatanodeRegistration(127.0.0.1:44309, datanodeUuid=f0c5c159-a508-4a07-b95c-3b714d78dfae, infoPort=44933, infoSecurePort=0, ipcPort=35997, storageInfo=lv=-57;cid=testClusterID;nsid=355074844;c=1732113916695) 2024-11-20T14:45:19,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13bd59b47e7fceae with lease ID 0x197da5a227e9d1c2: from storage DS-f28056e9-6aae-4887-948c-1efdb947b206 node DatanodeRegistration(127.0.0.1:44309, datanodeUuid=f0c5c159-a508-4a07-b95c-3b714d78dfae, infoPort=44933, infoSecurePort=0, ipcPort=35997, storageInfo=lv=-57;cid=testClusterID;nsid=355074844;c=1732113916695), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T14:45:19,306 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb0751679cade07 with lease ID 0x197da5a227e9d1c3: Processing first storage report for DS-2ad84331-a9e0-45d1-a24b-28466d70a968 from datanode DatanodeRegistration(127.0.0.1:40157, datanodeUuid=9a5088b2-a1aa-47aa-9ccb-7b237a8c173d, infoPort=42161, infoSecurePort=0, ipcPort=35535, storageInfo=lv=-57;cid=testClusterID;nsid=355074844;c=1732113916695) 2024-11-20T14:45:19,306 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb0751679cade07 with lease ID 0x197da5a227e9d1c3: from storage DS-2ad84331-a9e0-45d1-a24b-28466d70a968 node DatanodeRegistration(127.0.0.1:40157, datanodeUuid=9a5088b2-a1aa-47aa-9ccb-7b237a8c173d, infoPort=42161, infoSecurePort=0, ipcPort=35535, storageInfo=lv=-57;cid=testClusterID;nsid=355074844;c=1732113916695), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:45:19,375 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418 2024-11-20T14:45:19,435 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/zookeeper_0, clientPort=64781, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T14:45:19,444 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64781 2024-11-20T14:45:19,453 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:45:19,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:45:19,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:45:19,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:45:20,068 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe with version=8 2024-11-20T14:45:20,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase-staging 2024-11-20T14:45:20,150 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T14:45:20,402 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:45:20,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:45:20,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:45:20,416 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:45:20,417 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:45:20,417 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:45:20,547 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T14:45:20,602 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T14:45:20,611 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T14:45:20,615 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:45:20,639 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22736 (auto-detected) 2024-11-20T14:45:20,640 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T14:45:20,658 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38341 2024-11-20T14:45:20,679 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38341 connecting to ZooKeeper ensemble=127.0.0.1:64781 2024-11-20T14:45:20,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:383410x0, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:45:20,793 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38341-0x10159fe80800000 connected 2024-11-20T14:45:20,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:45:20,872 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:45:20,882 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:45:20,886 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe, hbase.cluster.distributed=false 2024-11-20T14:45:20,915 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:45:20,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38341 2024-11-20T14:45:20,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38341 2024-11-20T14:45:20,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38341 2024-11-20T14:45:20,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38341 2024-11-20T14:45:20,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38341 2024-11-20T14:45:21,057 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:45:21,059 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:45:21,060 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:45:21,060 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:45:21,060 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:45:21,060 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:45:21,064 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T14:45:21,068 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:45:21,069 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38313 2024-11-20T14:45:21,072 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38313 connecting to ZooKeeper ensemble=127.0.0.1:64781 2024-11-20T14:45:21,073 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:45:21,080 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:45:21,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:383130x0, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:45:21,118 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:383130x0, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:45:21,118 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38313-0x10159fe80800001 connected 2024-11-20T14:45:21,123 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T14:45:21,132 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T14:45:21,135 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T14:45:21,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:45:21,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38313 2024-11-20T14:45:21,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38313 2024-11-20T14:45:21,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38313 2024-11-20T14:45:21,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38313 2024-11-20T14:45:21,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38313 2024-11-20T14:45:21,165 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a15ecfd95f4:38341 2024-11-20T14:45:21,167 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:21,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:45:21,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:45:21,187 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:21,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:21,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T14:45:21,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:21,211 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T14:45:21,212 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a15ecfd95f4,38341,1732113920243 from backup master directory 2024-11-20T14:45:21,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:21,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:45:21,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:45:21,219 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:45:21,220 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:21,222 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T14:45:21,223 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T14:45:21,289 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase.id] with ID: 761c1661-0705-48e2-8fc2-b307034352fe 2024-11-20T14:45:21,289 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/.tmp/hbase.id 2024-11-20T14:45:21,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:45:21,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:45:21,313 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/.tmp/hbase.id]:[hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase.id] 2024-11-20T14:45:21,363 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:45:21,367 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T14:45:21,388 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-20T14:45:21,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:21,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:21,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:45:21,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:45:21,449 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:45:21,452 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T14:45:21,460 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:45:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:45:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:45:21,513 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store 2024-11-20T14:45:21,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:45:21,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:45:21,538 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T14:45:21,542 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:45:21,543 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:45:21,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:45:21,543 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:45:21,545 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:45:21,545 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:45:21,545 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:45:21,546 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732113921543Disabling compacts and flushes for region at 1732113921543Disabling writes for close at 1732113921545 (+2 ms)Writing region close event to WAL at 1732113921545Closed at 1732113921545 2024-11-20T14:45:21,549 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/.initializing 2024-11-20T14:45:21,549 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/WALs/1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:21,572 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C38341%2C1732113920243, suffix=, logDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/WALs/1a15ecfd95f4,38341,1732113920243, archiveDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/oldWALs, maxLogs=10 2024-11-20T14:45:21,580 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38341%2C1732113920243.1732113921576 2024-11-20T14:45:21,598 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/WALs/1a15ecfd95f4,38341,1732113920243/1a15ecfd95f4%2C38341%2C1732113920243.1732113921576 2024-11-20T14:45:21,606 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42161:42161),(127.0.0.1/127.0.0.1:44933:44933)] 2024-11-20T14:45:21,608 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:45:21,608 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:45:21,611 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,612 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,668 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T14:45:21,671 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:21,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:21,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T14:45:21,677 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:21,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:45:21,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,681 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T14:45:21,681 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:21,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:45:21,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,685 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T14:45:21,685 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:21,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:45:21,686 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,689 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,690 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,695 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,696 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,699 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T14:45:21,703 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:45:21,709 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:45:21,710 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698396, jitterRate=-0.11194416880607605}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T14:45:21,718 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732113921625Initializing all the Stores at 1732113921627 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732113921627Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732113921628 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732113921628Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732113921628Cleaning up temporary data from old regions at 1732113921696 (+68 ms)Region opened successfully at 1732113921718 (+22 ms) 2024-11-20T14:45:21,720 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T14:45:21,751 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2de223ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:45:21,780 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T14:45:21,791 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T14:45:21,791 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T14:45:21,794 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T14:45:21,796 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T14:45:21,800 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-20T14:45:21,801 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T14:45:21,825 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T14:45:21,833 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T14:45:21,876 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T14:45:21,879 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T14:45:21,881 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T14:45:21,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T14:45:21,894 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T14:45:21,898 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T14:45:21,910 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T14:45:21,912 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T14:45:21,918 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T14:45:21,935 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T14:45:21,943 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T14:45:21,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:45:21,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:45:21,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:21,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:21,956 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a15ecfd95f4,38341,1732113920243, sessionid=0x10159fe80800000, setting cluster-up flag (Was=false) 2024-11-20T14:45:21,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:21,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:22,010 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T14:45:22,013 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:22,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:22,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:22,060 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T14:45:22,064 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:22,073 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T14:45:22,140 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T14:45:22,150 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T14:45:22,151 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(746): ClusterId : 761c1661-0705-48e2-8fc2-b307034352fe 2024-11-20T14:45:22,153 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T14:45:22,157 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T14:45:22,162 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a15ecfd95f4,38341,1732113920243 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T14:45:22,186 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T14:45:22,186 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T14:45:22,187 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:45:22,187 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:45:22,187 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:45:22,187 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:45:22,188 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a15ecfd95f4:0, corePoolSize=10, maxPoolSize=10 2024-11-20T14:45:22,188 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,188 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:45:22,188 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,191 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732113952191 2024-11-20T14:45:22,193 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T14:45:22,194 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:45:22,194 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T14:45:22,194 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T14:45:22,194 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T14:45:22,195 DEBUG [RS:0;1a15ecfd95f4:38313 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@142fea21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:45:22,198 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T14:45:22,199 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T14:45:22,199 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T14:45:22,200 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T14:45:22,201 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:22,201 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T14:45:22,202 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,207 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T14:45:22,209 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T14:45:22,209 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T14:45:22,211 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a15ecfd95f4:38313 2024-11-20T14:45:22,213 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T14:45:22,213 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T14:45:22,214 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T14:45:22,215 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T14:45:22,215 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T14:45:22,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:45:22,217 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732113922217,5,FailOnTimeoutGroup] 2024-11-20T14:45:22,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:45:22,217 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a15ecfd95f4,38341,1732113920243 with port=38313, startcode=1732113921003 2024-11-20T14:45:22,218 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732113922218,5,FailOnTimeoutGroup] 2024-11-20T14:45:22,218 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,218 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T14:45:22,220 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,220 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,223 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T14:45:22,224 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe 2024-11-20T14:45:22,233 DEBUG [RS:0;1a15ecfd95f4:38313 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T14:45:22,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:45:22,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:45:22,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:45:22,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:45:22,246 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:45:22,246 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:22,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:22,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:45:22,251 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:45:22,251 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:22,252 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:22,252 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:45:22,256 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:45:22,256 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:22,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:22,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:45:22,261 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:45:22,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:22,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:22,263 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:45:22,264 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740 2024-11-20T14:45:22,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740 2024-11-20T14:45:22,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:45:22,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:45:22,269 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:45:22,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:45:22,276 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:45:22,277 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849237, jitterRate=0.0798618346452713}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:45:22,281 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732113922239Initializing all the Stores at 1732113922241 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732113922241Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732113922242 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732113922242Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732113922242Cleaning up temporary data from old regions at 1732113922268 (+26 ms)Region opened successfully at 1732113922281 (+13 ms) 2024-11-20T14:45:22,281 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:45:22,283 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:45:22,284 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:45:22,284 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:45:22,284 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:45:22,285 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:45:22,286 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732113922281Disabling compacts and flushes for region at 1732113922281Disabling writes for close at 1732113922284 (+3 ms)Writing region close event to WAL at 1732113922285 (+1 ms)Closed at 1732113922285 2024-11-20T14:45:22,289 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:45:22,289 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T14:45:22,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T14:45:22,309 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:45:22,310 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49689, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T14:45:22,313 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T14:45:22,317 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38341 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:22,320 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38341 {}] master.ServerManager(517): Registering regionserver=1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:22,333 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe 2024-11-20T14:45:22,334 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40825 2024-11-20T14:45:22,334 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T14:45:22,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:45:22,361 DEBUG [RS:0;1a15ecfd95f4:38313 {}] zookeeper.ZKUtil(111): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:22,361 WARN [RS:0;1a15ecfd95f4:38313 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:45:22,361 INFO [RS:0;1a15ecfd95f4:38313 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:45:22,362 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:22,365 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a15ecfd95f4,38313,1732113921003] 2024-11-20T14:45:22,391 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T14:45:22,407 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T14:45:22,411 INFO [RS:0;1a15ecfd95f4:38313 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:45:22,411 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,413 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T14:45:22,419 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T14:45:22,420 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,420 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,420 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,421 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,421 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,421 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,421 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:45:22,421 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,421 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,421 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,422 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,422 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,422 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:45:22,422 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:45:22,422 DEBUG [RS:0;1a15ecfd95f4:38313 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:45:22,425 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,425 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,426 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,426 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,426 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,426 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38313,1732113921003-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:45:22,443 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T14:45:22,445 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38313,1732113921003-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,445 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,445 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.Replication(171): 1a15ecfd95f4,38313,1732113921003 started 2024-11-20T14:45:22,461 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:22,462 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1482): Serving as 1a15ecfd95f4,38313,1732113921003, RpcServer on 1a15ecfd95f4/172.17.0.2:38313, sessionid=0x10159fe80800001 2024-11-20T14:45:22,463 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T14:45:22,463 DEBUG [RS:0;1a15ecfd95f4:38313 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:22,463 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,38313,1732113921003' 2024-11-20T14:45:22,463 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T14:45:22,464 WARN [1a15ecfd95f4:38341 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-20T14:45:22,464 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T14:45:22,465 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T14:45:22,465 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T14:45:22,465 DEBUG [RS:0;1a15ecfd95f4:38313 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:22,465 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,38313,1732113921003' 2024-11-20T14:45:22,465 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T14:45:22,466 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T14:45:22,466 DEBUG [RS:0;1a15ecfd95f4:38313 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T14:45:22,466 INFO [RS:0;1a15ecfd95f4:38313 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T14:45:22,467 INFO [RS:0;1a15ecfd95f4:38313 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T14:45:22,577 INFO [RS:0;1a15ecfd95f4:38313 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C38313%2C1732113921003, suffix=, logDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003, archiveDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs, maxLogs=32 2024-11-20T14:45:22,581 INFO [RS:0;1a15ecfd95f4:38313 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.1732113922581 2024-11-20T14:45:22,590 INFO [RS:0;1a15ecfd95f4:38313 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113922581 2024-11-20T14:45:22,592 DEBUG [RS:0;1a15ecfd95f4:38313 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42161:42161),(127.0.0.1/127.0.0.1:44933:44933)] 2024-11-20T14:45:22,717 DEBUG [1a15ecfd95f4:38341 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T14:45:22,729 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:22,737 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,38313,1732113921003, state=OPENING 2024-11-20T14:45:22,793 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T14:45:22,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:22,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:45:22,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:45:22,803 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:45:22,804 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:45:22,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,38313,1732113921003}] 2024-11-20T14:45:22,986 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T14:45:22,991 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52873, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T14:45:23,007 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T14:45:23,008 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:45:23,012 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C38313%2C1732113921003.meta, suffix=.meta, logDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003, archiveDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs, maxLogs=32 2024-11-20T14:45:23,015 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.meta.1732113923015.meta 2024-11-20T14:45:23,027 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.meta.1732113923015.meta 2024-11-20T14:45:23,032 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44933:44933),(127.0.0.1/127.0.0.1:42161:42161)] 2024-11-20T14:45:23,034 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:45:23,037 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T14:45:23,041 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T14:45:23,048 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T14:45:23,054 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T14:45:23,055 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:45:23,056 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T14:45:23,056 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T14:45:23,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:45:23,065 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:45:23,065 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:23,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:23,067 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:45:23,069 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:45:23,070 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:23,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:23,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:45:23,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:45:23,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:23,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:23,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:45:23,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:45:23,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:23,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:45:23,080 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:45:23,082 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740 2024-11-20T14:45:23,086 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740 2024-11-20T14:45:23,089 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:45:23,089 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:45:23,091 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:45:23,094 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:45:23,097 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767196, jitterRate=-0.024460777640342712}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:45:23,097 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T14:45:23,099 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732113923057Writing region info on filesystem at 1732113923057Initializing all the Stores at 1732113923059 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732113923060 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732113923062 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732113923063 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732113923063Cleaning up temporary data from old regions at 1732113923090 (+27 ms)Running coprocessor post-open hooks at 1732113923097 (+7 ms)Region opened successfully at 1732113923099 (+2 ms) 2024-11-20T14:45:23,108 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732113922977 2024-11-20T14:45:23,120 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T14:45:23,121 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T14:45:23,122 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:23,125 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,38313,1732113921003, state=OPEN 2024-11-20T14:45:23,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:45:23,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:45:23,189 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:45:23,189 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:45:23,189 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:23,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T14:45:23,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,38313,1732113921003 in 382 msec 2024-11-20T14:45:23,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T14:45:23,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 899 msec 2024-11-20T14:45:23,203 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:45:23,203 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T14:45:23,220 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:45:23,221 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,38313,1732113921003, seqNum=-1] 2024-11-20T14:45:23,237 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:45:23,239 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58839, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:45:23,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1560 sec 2024-11-20T14:45:23,257 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732113923257, completionTime=-1 2024-11-20T14:45:23,259 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T14:45:23,260 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-20T14:45:23,282 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-20T14:45:23,283 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732113983283 2024-11-20T14:45:23,283 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732114043283 2024-11-20T14:45:23,283 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 23 msec 2024-11-20T14:45:23,285 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38341,1732113920243-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:23,286 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38341,1732113920243-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:23,286 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38341,1732113920243-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:23,287 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a15ecfd95f4:38341, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:23,288 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:23,288 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:23,295 DEBUG [master/1a15ecfd95f4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T14:45:23,319 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.099sec 2024-11-20T14:45:23,320 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T14:45:23,322 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T14:45:23,323 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T14:45:23,323 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T14:45:23,324 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T14:45:23,324 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38341,1732113920243-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:45:23,325 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38341,1732113920243-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T14:45:23,333 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T14:45:23,334 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T14:45:23,335 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38341,1732113920243-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:45:23,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1344fa2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:45:23,364 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T14:45:23,364 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T14:45:23,367 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a15ecfd95f4,38341,-1 for getting cluster id 2024-11-20T14:45:23,370 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T14:45:23,378 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '761c1661-0705-48e2-8fc2-b307034352fe' 2024-11-20T14:45:23,381 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T14:45:23,381 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "761c1661-0705-48e2-8fc2-b307034352fe" 2024-11-20T14:45:23,381 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3af0eb1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:45:23,381 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a15ecfd95f4,38341,-1] 2024-11-20T14:45:23,384 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T14:45:23,386 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:45:23,387 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T14:45:23,390 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cc8a654, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:45:23,390 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:45:23,396 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,38313,1732113921003, seqNum=-1] 2024-11-20T14:45:23,397 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:45:23,399 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50658, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:45:23,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:23,420 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:45:23,426 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T14:45:23,429 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T14:45:23,434 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a15ecfd95f4,38341,1732113920243 2024-11-20T14:45:23,437 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@86ad67a 2024-11-20T14:45:23,438 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T14:45:23,440 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34192, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T14:45:23,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38341 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T14:45:23,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38341 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T14:45:23,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38341 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:45:23,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38341 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-20T14:45:23,455 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T14:45:23,457 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38341 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-20T14:45:23,458 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:23,461 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T14:45:23,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38341 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:45:23,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741835_1011 (size=389) 2024-11-20T14:45:23,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741835_1011 (size=389) 2024-11-20T14:45:23,510 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9c948070de0ca5ef1cb1f0039dc4ad36, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe 2024-11-20T14:45:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741836_1012 (size=72) 2024-11-20T14:45:23,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741836_1012 (size=72) 2024-11-20T14:45:23,524 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:45:23,524 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 9c948070de0ca5ef1cb1f0039dc4ad36, disabling compactions & flushes 2024-11-20T14:45:23,524 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:45:23,524 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:45:23,524 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. after waiting 0 ms 2024-11-20T14:45:23,524 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:45:23,524 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:45:23,524 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9c948070de0ca5ef1cb1f0039dc4ad36: Waiting for close lock at 1732113923524Disabling compacts and flushes for region at 1732113923524Disabling writes for close at 1732113923524Writing region close event to WAL at 1732113923524Closed at 1732113923524 2024-11-20T14:45:23,526 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T14:45:23,547 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732113923527"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732113923527"}]},"ts":"1732113923527"} 2024-11-20T14:45:23,552 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T14:45:23,555 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T14:45:23,558 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732113923555"}]},"ts":"1732113923555"} 2024-11-20T14:45:23,563 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-20T14:45:23,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9c948070de0ca5ef1cb1f0039dc4ad36, ASSIGN}] 2024-11-20T14:45:23,568 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9c948070de0ca5ef1cb1f0039dc4ad36, ASSIGN 2024-11-20T14:45:23,570 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9c948070de0ca5ef1cb1f0039dc4ad36, ASSIGN; state=OFFLINE, location=1a15ecfd95f4,38313,1732113921003; forceNewPlan=false, retain=false 2024-11-20T14:45:23,723 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9c948070de0ca5ef1cb1f0039dc4ad36, regionState=OPENING, regionLocation=1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:23,731 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9c948070de0ca5ef1cb1f0039dc4ad36, ASSIGN because future has completed 2024-11-20T14:45:23,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9c948070de0ca5ef1cb1f0039dc4ad36, server=1a15ecfd95f4,38313,1732113921003}] 2024-11-20T14:45:23,894 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:45:23,895 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9c948070de0ca5ef1cb1f0039dc4ad36, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:45:23,895 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,895 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:45:23,895 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,896 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,898 INFO [StoreOpener-9c948070de0ca5ef1cb1f0039dc4ad36-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,901 INFO [StoreOpener-9c948070de0ca5ef1cb1f0039dc4ad36-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9c948070de0ca5ef1cb1f0039dc4ad36 columnFamilyName info 2024-11-20T14:45:23,901 DEBUG [StoreOpener-9c948070de0ca5ef1cb1f0039dc4ad36-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:45:23,902 INFO [StoreOpener-9c948070de0ca5ef1cb1f0039dc4ad36-1 {}] regionserver.HStore(327): Store=9c948070de0ca5ef1cb1f0039dc4ad36/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:45:23,902 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,904 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,905 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,905 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,905 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,908 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,911 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:45:23,912 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9c948070de0ca5ef1cb1f0039dc4ad36; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868266, jitterRate=0.10405778884887695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T14:45:23,912 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:23,913 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9c948070de0ca5ef1cb1f0039dc4ad36: Running coprocessor pre-open hook at 1732113923896Writing region info on filesystem at 1732113923896Initializing all the Stores at 1732113923898 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732113923898Cleaning up temporary data from old regions at 1732113923905 (+7 ms)Running coprocessor post-open hooks at 1732113923912 (+7 ms)Region opened successfully at 1732113923913 (+1 ms) 2024-11-20T14:45:23,915 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36., pid=6, masterSystemTime=1732113923888 2024-11-20T14:45:23,918 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:45:23,919 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:45:23,920 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9c948070de0ca5ef1cb1f0039dc4ad36, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,38313,1732113921003 2024-11-20T14:45:23,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9c948070de0ca5ef1cb1f0039dc4ad36, server=1a15ecfd95f4,38313,1732113921003 because future has completed 2024-11-20T14:45:23,930 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T14:45:23,932 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9c948070de0ca5ef1cb1f0039dc4ad36, server=1a15ecfd95f4,38313,1732113921003 in 195 msec 2024-11-20T14:45:23,934 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T14:45:23,934 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9c948070de0ca5ef1cb1f0039dc4ad36, ASSIGN in 365 msec 2024-11-20T14:45:23,935 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T14:45:23,936 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732113923935"}]},"ts":"1732113923935"} 2024-11-20T14:45:23,939 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-20T14:45:23,940 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T14:45:23,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 492 msec 2024-11-20T14:45:28,572 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T14:45:28,639 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T14:45:28,641 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-20T14:45:30,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T14:45:30,600 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T14:45:30,602 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-20T14:45:30,602 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T14:45:30,604 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:45:30,604 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T14:45:30,605 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T14:45:30,605 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T14:45:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38341 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:45:33,536 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-20T14:45:33,539 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-20T14:45:33,548 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-20T14:45:33,549 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:45:33,550 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.1732113933549 2024-11-20T14:45:33,567 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:33,567 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:33,568 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:33,568 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:33,568 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:33,568 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113922581 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113933549 2024-11-20T14:45:33,571 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44933:44933),(127.0.0.1/127.0.0.1:42161:42161)] 2024-11-20T14:45:33,571 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113922581 is not closed yet, will try archiving it next time 2024-11-20T14:45:33,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741833_1009 (size=451) 2024-11-20T14:45:33,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741833_1009 (size=451) 2024-11-20T14:45:33,579 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113922581 to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs/1a15ecfd95f4%2C38313%2C1732113921003.1732113922581 2024-11-20T14:45:33,580 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36., hostname=1a15ecfd95f4,38313,1732113921003, seqNum=2] 2024-11-20T14:45:45,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38313 {}] regionserver.HRegion(8855): Flush requested on 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:45:45,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9c948070de0ca5ef1cb1f0039dc4ad36 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:45:45,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/202732bc63bf46d788a55feb625e0fe5 is 1080, key is row0001/info:/1732113933585/Put/seqid=0 2024-11-20T14:45:45,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741838_1014 (size=12509) 2024-11-20T14:45:45,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741838_1014 (size=12509) 2024-11-20T14:45:45,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/202732bc63bf46d788a55feb625e0fe5 2024-11-20T14:45:45,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/202732bc63bf46d788a55feb625e0fe5 as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/202732bc63bf46d788a55feb625e0fe5 2024-11-20T14:45:45,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/202732bc63bf46d788a55feb625e0fe5, entries=7, sequenceid=11, filesize=12.2 K 2024-11-20T14:45:45,769 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9c948070de0ca5ef1cb1f0039dc4ad36 in 124ms, sequenceid=11, compaction requested=false 2024-11-20T14:45:45,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9c948070de0ca5ef1cb1f0039dc4ad36: 2024-11-20T14:45:49,372 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T14:45:53,657 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.1732113953656 2024-11-20T14:45:53,868 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:45:53,868 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:53,868 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:53,868 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:53,869 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:53,869 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:45:53,869 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113933549 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113953656 2024-11-20T14:45:53,870 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44933:44933),(127.0.0.1/127.0.0.1:42161:42161)] 2024-11-20T14:45:53,870 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113933549 is not closed yet, will try archiving it next time 2024-11-20T14:45:53,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741837_1013 (size=12399) 2024-11-20T14:45:53,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741837_1013 (size=12399) 2024-11-20T14:45:54,074 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:45:56,282 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:45:58,487 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:00,692 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:00,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38313 {}] regionserver.HRegion(8855): Flush requested on 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:46:00,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9c948070de0ca5ef1cb1f0039dc4ad36 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:46:00,895 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:00,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/57e6960d01b048a595a26237a412392d is 1080, key is row0008/info:/1732113947644/Put/seqid=0 2024-11-20T14:46:00,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741840_1016 (size=12509) 2024-11-20T14:46:00,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741840_1016 (size=12509) 2024-11-20T14:46:00,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/57e6960d01b048a595a26237a412392d 2024-11-20T14:46:00,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/57e6960d01b048a595a26237a412392d as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/57e6960d01b048a595a26237a412392d 2024-11-20T14:46:00,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/57e6960d01b048a595a26237a412392d, entries=7, sequenceid=21, filesize=12.2 K 2024-11-20T14:46:01,144 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:01,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9c948070de0ca5ef1cb1f0039dc4ad36 in 452ms, sequenceid=21, compaction requested=false 2024-11-20T14:46:01,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9c948070de0ca5ef1cb1f0039dc4ad36: 2024-11-20T14:46:01,145 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-20T14:46:01,145 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:46:01,146 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/202732bc63bf46d788a55feb625e0fe5 because midkey is the same as first or last row 2024-11-20T14:46:02,897 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:03,337 INFO [master/1a15ecfd95f4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T14:46:03,337 INFO [master/1a15ecfd95f4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T14:46:05,102 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:05,104 WARN [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:05,105 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C38313%2C1732113921003:(num 1732113953656) roll requested 2024-11-20T14:46:05,105 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.1732113965105 2024-11-20T14:46:05,317 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:05,318 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:05,318 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:05,318 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:05,318 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:05,318 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:05,319 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113953656 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113965105 2024-11-20T14:46:05,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741839_1015 (size=7739) 2024-11-20T14:46:05,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741839_1015 (size=7739) 2024-11-20T14:46:05,330 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42161:42161),(127.0.0.1/127.0.0.1:44933:44933)] 2024-11-20T14:46:05,330 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113953656 is not closed yet, will try archiving it next time 2024-11-20T14:46:05,330 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113933549 to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs/1a15ecfd95f4%2C38313%2C1732113921003.1732113933549 2024-11-20T14:46:07,306 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK]] 2024-11-20T14:46:08,896 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9c948070de0ca5ef1cb1f0039dc4ad36, had cached 0 bytes from a total of 25018 2024-11-20T14:46:09,511 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK]] 2024-11-20T14:46:11,717 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK]] 2024-11-20T14:46:13,922 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK]] 2024-11-20T14:46:15,924 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T14:46:15,925 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.1732113975925 2024-11-20T14:46:19,372 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T14:46:20,943 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5014 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK]] 2024-11-20T14:46:20,945 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5014 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK], DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK]] 2024-11-20T14:46:20,945 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C38313%2C1732113921003:(num 1732113975925) roll requested 2024-11-20T14:46:20,945 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:20,945 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:20,946 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:20,946 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:20,946 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:20,946 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113965105 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113975925 2024-11-20T14:46:20,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44933:44933),(127.0.0.1/127.0.0.1:42161:42161)] 2024-11-20T14:46:20,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113965105 is not closed yet, will try archiving it next time 2024-11-20T14:46:20,948 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.1732113980947 2024-11-20T14:46:20,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741841_1017 (size=4753) 2024-11-20T14:46:20,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741841_1017 (size=4753) 2024-11-20T14:46:25,950 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:25,950 WARN [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38313 {}] regionserver.HRegion(8855): Flush requested on 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:46:25,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9c948070de0ca5ef1cb1f0039dc4ad36 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:46:25,957 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:25,957 WARN [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:27,952 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T14:46:30,953 INFO [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:30,953 WARN [FSHLog-0-hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe-prefix:1a15ecfd95f4,38313,1732113921003 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44309,DS-d901d07b-a071-473a-ad37-daf3a1d7bf83,DISK], DatanodeInfoWithStorage[127.0.0.1:40157,DS-1b6115b3-6eca-4b16-a34a-24043d3f08ec,DISK]] 2024-11-20T14:46:30,953 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,953 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,953 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,953 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,953 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,954 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113975925 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113980947 2024-11-20T14:46:30,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741842_1018 (size=1569) 2024-11-20T14:46:30,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741842_1018 (size=1569) 2024-11-20T14:46:30,956 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42161:42161),(127.0.0.1/127.0.0.1:44933:44933)] 2024-11-20T14:46:30,956 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113975925 is not closed yet, will try archiving it next time 2024-11-20T14:46:30,956 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C38313%2C1732113921003:(num 1732113980947) roll requested 2024-11-20T14:46:30,957 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.1732113990957 2024-11-20T14:46:30,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/ea66ec31b6ac4ab3ae4f574428b0570c is 1080, key is row0015/info:/1732113962694/Put/seqid=0 2024-11-20T14:46:30,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,966 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113980947 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113990957 2024-11-20T14:46:30,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741845_1021 (size=12509) 2024-11-20T14:46:30,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741845_1021 (size=12509) 2024-11-20T14:46:30,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741843_1019 (size=93) 2024-11-20T14:46:30,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741843_1019 (size=93) 2024-11-20T14:46:30,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/ea66ec31b6ac4ab3ae4f574428b0570c 2024-11-20T14:46:30,975 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42161:42161),(127.0.0.1/127.0.0.1:44933:44933)] 2024-11-20T14:46:30,975 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113980947 is not closed yet, will try archiving it next time 2024-11-20T14:46:30,975 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38313%2C1732113921003.1732113990975 2024-11-20T14:46:30,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/ea66ec31b6ac4ab3ae4f574428b0570c as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/ea66ec31b6ac4ab3ae4f574428b0570c 2024-11-20T14:46:30,988 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,988 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,988 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,989 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,989 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:30,989 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113990957 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113990975 2024-11-20T14:46:30,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741844_1020 (size=1258) 2024-11-20T14:46:30,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741844_1020 (size=1258) 2024-11-20T14:46:30,993 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113980947 is not closed yet, will try archiving it next time 2024-11-20T14:46:30,995 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44933:44933),(127.0.0.1/127.0.0.1:42161:42161)] 2024-11-20T14:46:30,995 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113980947 is not closed yet, will try archiving it next time 2024-11-20T14:46:30,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/ea66ec31b6ac4ab3ae4f574428b0570c, entries=7, sequenceid=31, filesize=12.2 K 2024-11-20T14:46:30,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for 9c948070de0ca5ef1cb1f0039dc4ad36 in 5046ms, sequenceid=31, compaction requested=true 2024-11-20T14:46:30,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9c948070de0ca5ef1cb1f0039dc4ad36: 2024-11-20T14:46:30,998 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-20T14:46:30,998 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:46:30,998 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/202732bc63bf46d788a55feb625e0fe5 because midkey is the same as first or last row 2024-11-20T14:46:30,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9c948070de0ca5ef1cb1f0039dc4ad36:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:46:31,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:46:31,001 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:46:31,004 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:46:31,005 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.HStore(1541): 9c948070de0ca5ef1cb1f0039dc4ad36/info is initiating minor compaction (all files) 2024-11-20T14:46:31,005 INFO [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9c948070de0ca5ef1cb1f0039dc4ad36/info in TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:46:31,005 INFO [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/202732bc63bf46d788a55feb625e0fe5, hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/57e6960d01b048a595a26237a412392d, hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/ea66ec31b6ac4ab3ae4f574428b0570c] into tmpdir=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp, totalSize=36.6 K 2024-11-20T14:46:31,007 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] compactions.Compactor(225): Compacting 202732bc63bf46d788a55feb625e0fe5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732113933585 2024-11-20T14:46:31,007 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57e6960d01b048a595a26237a412392d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732113947644 2024-11-20T14:46:31,008 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] compactions.Compactor(225): Compacting ea66ec31b6ac4ab3ae4f574428b0570c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732113962694 2024-11-20T14:46:31,035 INFO [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9c948070de0ca5ef1cb1f0039dc4ad36#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:46:31,036 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/4ec8234dada44a5eaea13fb00ad40b06 is 1080, key is row0001/info:/1732113933585/Put/seqid=0 2024-11-20T14:46:31,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741847_1023 (size=27710) 2024-11-20T14:46:31,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741847_1023 (size=27710) 2024-11-20T14:46:31,052 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/4ec8234dada44a5eaea13fb00ad40b06 as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/4ec8234dada44a5eaea13fb00ad40b06 2024-11-20T14:46:31,069 INFO [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9c948070de0ca5ef1cb1f0039dc4ad36/info of 9c948070de0ca5ef1cb1f0039dc4ad36 into 4ec8234dada44a5eaea13fb00ad40b06(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:46:31,069 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9c948070de0ca5ef1cb1f0039dc4ad36: 2024-11-20T14:46:31,071 INFO [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36., storeName=9c948070de0ca5ef1cb1f0039dc4ad36/info, priority=13, startTime=1732113990999; duration=0sec 2024-11-20T14:46:31,071 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T14:46:31,071 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:46:31,071 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/4ec8234dada44a5eaea13fb00ad40b06 because midkey is the same as first or last row 2024-11-20T14:46:31,072 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T14:46:31,072 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:46:31,072 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/4ec8234dada44a5eaea13fb00ad40b06 because midkey is the same as first or last row 2024-11-20T14:46:31,072 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-20T14:46:31,072 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:46:31,072 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/4ec8234dada44a5eaea13fb00ad40b06 because midkey is the same as first or last row 2024-11-20T14:46:31,072 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:46:31,072 DEBUG [RS:0;1a15ecfd95f4:38313-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9c948070de0ca5ef1cb1f0039dc4ad36:info 2024-11-20T14:46:31,372 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113953656 to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs/1a15ecfd95f4%2C38313%2C1732113921003.1732113953656 2024-11-20T14:46:31,376 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113965105 to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs/1a15ecfd95f4%2C38313%2C1732113921003.1732113965105 2024-11-20T14:46:31,379 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113975925 to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs/1a15ecfd95f4%2C38313%2C1732113921003.1732113975925 2024-11-20T14:46:31,381 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/WALs/1a15ecfd95f4,38313,1732113921003/1a15ecfd95f4%2C38313%2C1732113921003.1732113980947 to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs/1a15ecfd95f4%2C38313%2C1732113921003.1732113980947 2024-11-20T14:46:43,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38313 {}] regionserver.HRegion(8855): Flush requested on 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:46:43,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9c948070de0ca5ef1cb1f0039dc4ad36 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:46:43,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/5e902eae229c43cd868f031310a30d26 is 1080, key is row0022/info:/1732113990976/Put/seqid=0 2024-11-20T14:46:43,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741848_1024 (size=12509) 2024-11-20T14:46:43,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741848_1024 (size=12509) 2024-11-20T14:46:43,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/5e902eae229c43cd868f031310a30d26 2024-11-20T14:46:43,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/5e902eae229c43cd868f031310a30d26 as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/5e902eae229c43cd868f031310a30d26 2024-11-20T14:46:43,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/5e902eae229c43cd868f031310a30d26, entries=7, sequenceid=42, filesize=12.2 K 2024-11-20T14:46:43,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9c948070de0ca5ef1cb1f0039dc4ad36 in 44ms, sequenceid=42, compaction requested=false 2024-11-20T14:46:43,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9c948070de0ca5ef1cb1f0039dc4ad36: 2024-11-20T14:46:43,058 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-20T14:46:43,058 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:46:43,058 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/4ec8234dada44a5eaea13fb00ad40b06 because midkey is the same as first or last row 2024-11-20T14:46:49,372 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T14:46:51,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T14:46:51,030 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:46:51,030 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:46:51,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:51,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:51,035 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T14:46:51,035 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T14:46:51,035 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=233511752, stopped=false 2024-11-20T14:46:51,036 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a15ecfd95f4,38341,1732113920243 2024-11-20T14:46:51,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:46:51,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:46:51,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:51,082 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:46:51,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:51,082 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:46:51,082 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:46:51,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:51,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:46:51,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:46:51,083 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a15ecfd95f4,38313,1732113921003' ***** 2024-11-20T14:46:51,083 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T14:46:51,083 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T14:46:51,083 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T14:46:51,083 INFO [RS:0;1a15ecfd95f4:38313 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T14:46:51,084 INFO [RS:0;1a15ecfd95f4:38313 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T14:46:51,084 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(3091): Received CLOSE for 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:46:51,084 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(959): stopping server 1a15ecfd95f4,38313,1732113921003 2024-11-20T14:46:51,084 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:46:51,084 INFO [RS:0;1a15ecfd95f4:38313 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a15ecfd95f4:38313. 2024-11-20T14:46:51,085 DEBUG [RS:0;1a15ecfd95f4:38313 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:46:51,085 DEBUG [RS:0;1a15ecfd95f4:38313 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:51,085 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9c948070de0ca5ef1cb1f0039dc4ad36, disabling compactions & flushes 2024-11-20T14:46:51,085 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:46:51,085 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T14:46:51,085 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:46:51,085 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T14:46:51,085 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T14:46:51,085 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. after waiting 0 ms 2024-11-20T14:46:51,085 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:46:51,085 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T14:46:51,085 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9c948070de0ca5ef1cb1f0039dc4ad36 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-20T14:46:51,085 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T14:46:51,085 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 9c948070de0ca5ef1cb1f0039dc4ad36=TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.} 2024-11-20T14:46:51,086 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:46:51,086 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:46:51,086 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:46:51,086 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:46:51,086 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:46:51,086 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:46:51,086 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-20T14:46:51,094 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/1a3f55f5df454920a57d740c4726c2ae is 1080, key is row0029/info:/1732114005018/Put/seqid=0 2024-11-20T14:46:51,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741849_1025 (size=8193) 2024-11-20T14:46:51,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741849_1025 (size=8193) 2024-11-20T14:46:51,113 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/info/5fb1d5bb752a4f8daaeed7847de7b24a is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36./info:regioninfo/1732113923920/Put/seqid=0 2024-11-20T14:46:51,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741850_1026 (size=7016) 2024-11-20T14:46:51,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741850_1026 (size=7016) 2024-11-20T14:46:51,126 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/info/5fb1d5bb752a4f8daaeed7847de7b24a 2024-11-20T14:46:51,157 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/ns/0e6015a081d7426aba255ecb9eb3cf69 is 43, key is default/ns:d/1732113923243/Put/seqid=0 2024-11-20T14:46:51,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741851_1027 (size=5153) 2024-11-20T14:46:51,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741851_1027 (size=5153) 2024-11-20T14:46:51,166 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/ns/0e6015a081d7426aba255ecb9eb3cf69 2024-11-20T14:46:51,194 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/table/2dda13c837e94ba790656f296b087a71 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732113923935/Put/seqid=0 2024-11-20T14:46:51,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741852_1028 (size=5396) 2024-11-20T14:46:51,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741852_1028 (size=5396) 2024-11-20T14:46:51,202 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/table/2dda13c837e94ba790656f296b087a71 2024-11-20T14:46:51,214 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/info/5fb1d5bb752a4f8daaeed7847de7b24a as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/info/5fb1d5bb752a4f8daaeed7847de7b24a 2024-11-20T14:46:51,225 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/info/5fb1d5bb752a4f8daaeed7847de7b24a, entries=10, sequenceid=11, filesize=6.9 K 2024-11-20T14:46:51,226 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/ns/0e6015a081d7426aba255ecb9eb3cf69 as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/ns/0e6015a081d7426aba255ecb9eb3cf69 2024-11-20T14:46:51,237 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/ns/0e6015a081d7426aba255ecb9eb3cf69, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T14:46:51,239 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/.tmp/table/2dda13c837e94ba790656f296b087a71 as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/table/2dda13c837e94ba790656f296b087a71 2024-11-20T14:46:51,251 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/table/2dda13c837e94ba790656f296b087a71, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T14:46:51,253 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 167ms, sequenceid=11, compaction requested=false 2024-11-20T14:46:51,265 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T14:46:51,269 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:46:51,269 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:46:51,269 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114011085Running coprocessor pre-close hooks at 1732114011085Disabling compacts and flushes for region at 1732114011086 (+1 ms)Disabling writes for close at 1732114011086Obtaining lock to block concurrent updates at 1732114011086Preparing flush snapshotting stores in 1588230740 at 1732114011086Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732114011087 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732114011088 (+1 ms)Flushing 1588230740/info: creating writer at 1732114011088Flushing 1588230740/info: appending metadata at 1732114011112 (+24 ms)Flushing 1588230740/info: closing flushed file at 1732114011112Flushing 1588230740/ns: creating writer at 1732114011136 (+24 ms)Flushing 1588230740/ns: appending metadata at 1732114011157 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1732114011157Flushing 1588230740/table: creating writer at 1732114011175 (+18 ms)Flushing 1588230740/table: appending metadata at 1732114011193 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732114011193Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a6b9552: reopening flushed file at 1732114011212 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6679f9a0: reopening flushed file at 1732114011225 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@429dfdeb: reopening flushed file at 1732114011237 (+12 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 167ms, sequenceid=11, compaction requested=false at 1732114011253 (+16 ms)Writing region close event to WAL at 1732114011259 (+6 ms)Running coprocessor post-close hooks at 1732114011267 (+8 ms)Closed at 1732114011269 (+2 ms) 2024-11-20T14:46:51,270 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T14:46:51,286 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1351): Waiting on 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:46:51,486 DEBUG [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1351): Waiting on 9c948070de0ca5ef1cb1f0039dc4ad36 2024-11-20T14:46:51,503 INFO [regionserver/1a15ecfd95f4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T14:46:51,503 INFO [regionserver/1a15ecfd95f4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T14:46:51,504 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/1a3f55f5df454920a57d740c4726c2ae 2024-11-20T14:46:51,514 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/.tmp/info/1a3f55f5df454920a57d740c4726c2ae as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/1a3f55f5df454920a57d740c4726c2ae 2024-11-20T14:46:51,524 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/1a3f55f5df454920a57d740c4726c2ae, entries=3, sequenceid=48, filesize=8.0 K 2024-11-20T14:46:51,525 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9c948070de0ca5ef1cb1f0039dc4ad36 in 440ms, sequenceid=48, compaction requested=true 2024-11-20T14:46:51,526 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/202732bc63bf46d788a55feb625e0fe5, hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/57e6960d01b048a595a26237a412392d, hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/ea66ec31b6ac4ab3ae4f574428b0570c] to archive 2024-11-20T14:46:51,529 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T14:46:51,533 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/202732bc63bf46d788a55feb625e0fe5 to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/202732bc63bf46d788a55feb625e0fe5 2024-11-20T14:46:51,535 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/57e6960d01b048a595a26237a412392d to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/57e6960d01b048a595a26237a412392d 2024-11-20T14:46:51,537 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/ea66ec31b6ac4ab3ae4f574428b0570c to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/info/ea66ec31b6ac4ab3ae4f574428b0570c 2024-11-20T14:46:51,548 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a15ecfd95f4:38341 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-20T14:46:51,551 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [202732bc63bf46d788a55feb625e0fe5=12509, 57e6960d01b048a595a26237a412392d=12509, ea66ec31b6ac4ab3ae4f574428b0570c=12509] 2024-11-20T14:46:51,563 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/data/default/TestLogRolling-testSlowSyncLogRolling/9c948070de0ca5ef1cb1f0039dc4ad36/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-20T14:46:51,565 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:46:51,565 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9c948070de0ca5ef1cb1f0039dc4ad36: Waiting for close lock at 1732114011084Running coprocessor pre-close hooks at 1732114011085 (+1 ms)Disabling compacts and flushes for region at 1732114011085Disabling writes for close at 1732114011085Obtaining lock to block concurrent updates at 1732114011085Preparing flush snapshotting stores in 9c948070de0ca5ef1cb1f0039dc4ad36 at 1732114011085Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732114011086 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. at 1732114011087 (+1 ms)Flushing 9c948070de0ca5ef1cb1f0039dc4ad36/info: creating writer at 1732114011087Flushing 9c948070de0ca5ef1cb1f0039dc4ad36/info: appending metadata at 1732114011094 (+7 ms)Flushing 9c948070de0ca5ef1cb1f0039dc4ad36/info: closing flushed file at 1732114011094Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1dda3867: reopening flushed file at 1732114011512 (+418 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9c948070de0ca5ef1cb1f0039dc4ad36 in 440ms, sequenceid=48, compaction requested=true at 1732114011525 (+13 ms)Writing region close event to WAL at 1732114011552 (+27 ms)Running coprocessor post-close hooks at 1732114011565 (+13 ms)Closed at 1732114011565 2024-11-20T14:46:51,565 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732113923442.9c948070de0ca5ef1cb1f0039dc4ad36. 2024-11-20T14:46:51,687 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(976): stopping server 1a15ecfd95f4,38313,1732113921003; all regions closed. 2024-11-20T14:46:51,689 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,689 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,689 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,689 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,689 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741834_1010 (size=3066) 2024-11-20T14:46:51,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741834_1010 (size=3066) 2024-11-20T14:46:51,697 DEBUG [RS:0;1a15ecfd95f4:38313 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs 2024-11-20T14:46:51,697 INFO [RS:0;1a15ecfd95f4:38313 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C38313%2C1732113921003.meta:.meta(num 1732113923015) 2024-11-20T14:46:51,698 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,698 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,698 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,698 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,698 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741846_1022 (size=13040) 2024-11-20T14:46:51,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741846_1022 (size=13040) 2024-11-20T14:46:51,707 DEBUG [RS:0;1a15ecfd95f4:38313 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/oldWALs 2024-11-20T14:46:51,707 INFO [RS:0;1a15ecfd95f4:38313 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C38313%2C1732113921003:(num 1732113990975) 2024-11-20T14:46:51,707 DEBUG [RS:0;1a15ecfd95f4:38313 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:51,707 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:46:51,707 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:46:51,707 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.ChoreService(370): Chore service for: regionserver/1a15ecfd95f4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T14:46:51,708 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:46:51,708 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:46:51,708 INFO [RS:0;1a15ecfd95f4:38313 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38313 2024-11-20T14:46:51,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:46:51,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a15ecfd95f4,38313,1732113921003 2024-11-20T14:46:51,756 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:46:51,765 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a15ecfd95f4,38313,1732113921003] 2024-11-20T14:46:51,773 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a15ecfd95f4,38313,1732113921003 already deleted, retry=false 2024-11-20T14:46:51,773 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a15ecfd95f4,38313,1732113921003 expired; onlineServers=0 2024-11-20T14:46:51,773 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a15ecfd95f4,38341,1732113920243' ***** 2024-11-20T14:46:51,773 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T14:46:51,773 INFO [M:0;1a15ecfd95f4:38341 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:46:51,774 INFO [M:0;1a15ecfd95f4:38341 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:46:51,774 DEBUG [M:0;1a15ecfd95f4:38341 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T14:46:51,774 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T14:46:51,774 DEBUG [M:0;1a15ecfd95f4:38341 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T14:46:51,774 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732113922218 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732113922218,5,FailOnTimeoutGroup] 2024-11-20T14:46:51,774 INFO [M:0;1a15ecfd95f4:38341 {}] hbase.ChoreService(370): Chore service for: master/1a15ecfd95f4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T14:46:51,774 INFO [M:0;1a15ecfd95f4:38341 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:46:51,774 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732113922217 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732113922217,5,FailOnTimeoutGroup] 2024-11-20T14:46:51,774 DEBUG [M:0;1a15ecfd95f4:38341 {}] master.HMaster(1795): Stopping service threads 2024-11-20T14:46:51,775 INFO [M:0;1a15ecfd95f4:38341 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T14:46:51,775 INFO [M:0;1a15ecfd95f4:38341 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:46:51,775 INFO [M:0;1a15ecfd95f4:38341 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T14:46:51,775 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T14:46:51,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T14:46:51,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:51,782 DEBUG [M:0;1a15ecfd95f4:38341 {}] zookeeper.ZKUtil(347): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T14:46:51,782 WARN [M:0;1a15ecfd95f4:38341 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T14:46:51,783 INFO [M:0;1a15ecfd95f4:38341 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/.lastflushedseqids 2024-11-20T14:46:51,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741853_1029 (size=130) 2024-11-20T14:46:51,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741853_1029 (size=130) 2024-11-20T14:46:51,798 INFO [M:0;1a15ecfd95f4:38341 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T14:46:51,798 INFO [M:0;1a15ecfd95f4:38341 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T14:46:51,798 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:46:51,798 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:51,798 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:51,798 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:46:51,798 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:51,799 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-20T14:46:51,820 DEBUG [M:0;1a15ecfd95f4:38341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fff4ee310efd41ec98493ad90bbc9f4b is 82, key is hbase:meta,,1/info:regioninfo/1732113923122/Put/seqid=0 2024-11-20T14:46:51,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741854_1030 (size=5672) 2024-11-20T14:46:51,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741854_1030 (size=5672) 2024-11-20T14:46:51,830 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fff4ee310efd41ec98493ad90bbc9f4b 2024-11-20T14:46:51,857 DEBUG [M:0;1a15ecfd95f4:38341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33bc4e116fb94bd38ff6f8f794211c48 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732113923942/Put/seqid=0 2024-11-20T14:46:51,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:46:51,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38313-0x10159fe80800001, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:46:51,866 INFO [RS:0;1a15ecfd95f4:38313 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:46:51,866 INFO [RS:0;1a15ecfd95f4:38313 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a15ecfd95f4,38313,1732113921003; zookeeper connection closed. 2024-11-20T14:46:51,867 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@268d11f5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@268d11f5 2024-11-20T14:46:51,867 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T14:46:51,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741855_1031 (size=6248) 2024-11-20T14:46:51,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741855_1031 (size=6248) 2024-11-20T14:46:51,873 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33bc4e116fb94bd38ff6f8f794211c48 2024-11-20T14:46:51,882 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 33bc4e116fb94bd38ff6f8f794211c48 2024-11-20T14:46:51,900 DEBUG [M:0;1a15ecfd95f4:38341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4e9cb0e6b1f042cfbf454331d9cd95f6 is 69, key is 1a15ecfd95f4,38313,1732113921003/rs:state/1732113922322/Put/seqid=0 2024-11-20T14:46:51,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741856_1032 (size=5156) 2024-11-20T14:46:51,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741856_1032 (size=5156) 2024-11-20T14:46:51,909 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4e9cb0e6b1f042cfbf454331d9cd95f6 2024-11-20T14:46:51,933 DEBUG [M:0;1a15ecfd95f4:38341 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd1ded0750894e18a4ba99273c989eb6 is 52, key is load_balancer_on/state:d/1732113923423/Put/seqid=0 2024-11-20T14:46:51,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741857_1033 (size=5056) 2024-11-20T14:46:51,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741857_1033 (size=5056) 2024-11-20T14:46:51,941 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd1ded0750894e18a4ba99273c989eb6 2024-11-20T14:46:51,950 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fff4ee310efd41ec98493ad90bbc9f4b as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fff4ee310efd41ec98493ad90bbc9f4b 2024-11-20T14:46:51,957 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fff4ee310efd41ec98493ad90bbc9f4b, entries=8, sequenceid=59, filesize=5.5 K 2024-11-20T14:46:51,959 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/33bc4e116fb94bd38ff6f8f794211c48 as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/33bc4e116fb94bd38ff6f8f794211c48 2024-11-20T14:46:51,967 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 33bc4e116fb94bd38ff6f8f794211c48 2024-11-20T14:46:51,967 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/33bc4e116fb94bd38ff6f8f794211c48, entries=6, sequenceid=59, filesize=6.1 K 2024-11-20T14:46:51,969 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4e9cb0e6b1f042cfbf454331d9cd95f6 as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4e9cb0e6b1f042cfbf454331d9cd95f6 2024-11-20T14:46:51,978 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4e9cb0e6b1f042cfbf454331d9cd95f6, entries=1, sequenceid=59, filesize=5.0 K 2024-11-20T14:46:51,979 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bd1ded0750894e18a4ba99273c989eb6 as hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bd1ded0750894e18a4ba99273c989eb6 2024-11-20T14:46:51,988 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bd1ded0750894e18a4ba99273c989eb6, entries=1, sequenceid=59, filesize=4.9 K 2024-11-20T14:46:51,989 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 191ms, sequenceid=59, compaction requested=false 2024-11-20T14:46:51,993 INFO [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:51,993 DEBUG [M:0;1a15ecfd95f4:38341 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114011798Disabling compacts and flushes for region at 1732114011798Disabling writes for close at 1732114011798Obtaining lock to block concurrent updates at 1732114011799 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732114011799Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1732114011799Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732114011800 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732114011800Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732114011820 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732114011820Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732114011838 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732114011856 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732114011856Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732114011882 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732114011899 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732114011899Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732114011916 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732114011932 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732114011932Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4628c71c: reopening flushed file at 1732114011948 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bef4181: reopening flushed file at 1732114011958 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6740e9a: reopening flushed file at 1732114011967 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31b4e138: reopening flushed file at 1732114011978 (+11 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 191ms, sequenceid=59, compaction requested=false at 1732114011989 (+11 ms)Writing region close event to WAL at 1732114011993 (+4 ms)Closed at 1732114011993 2024-11-20T14:46:51,994 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,994 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,994 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,994 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,994 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:51,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40157 is added to blk_1073741830_1006 (size=27985) 2024-11-20T14:46:51,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44309 is added to blk_1073741830_1006 (size=27985) 2024-11-20T14:46:52,401 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:46:52,401 INFO [M:0;1a15ecfd95f4:38341 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T14:46:52,401 INFO [M:0;1a15ecfd95f4:38341 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38341 2024-11-20T14:46:52,401 INFO [M:0;1a15ecfd95f4:38341 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:46:52,432 INFO [regionserver/1a15ecfd95f4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:46:52,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:46:52,548 INFO [M:0;1a15ecfd95f4:38341 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:46:52,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38341-0x10159fe80800000, quorum=127.0.0.1:64781, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:46:52,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54b536b1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:46:52,559 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59ce19fe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:46:52,559 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:46:52,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b18aeba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:46:52,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6176039d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.log.dir/,STOPPED} 2024-11-20T14:46:52,563 WARN [BP-1758166626-172.17.0.2-1732113916695 heartbeating to localhost/127.0.0.1:40825 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:46:52,563 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:46:52,563 WARN [BP-1758166626-172.17.0.2-1732113916695 heartbeating to localhost/127.0.0.1:40825 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1758166626-172.17.0.2-1732113916695 (Datanode Uuid f0c5c159-a508-4a07-b95c-3b714d78dfae) service to localhost/127.0.0.1:40825 2024-11-20T14:46:52,563 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:46:52,565 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/data/data3/current/BP-1758166626-172.17.0.2-1732113916695 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:46:52,565 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/data/data4/current/BP-1758166626-172.17.0.2-1732113916695 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:46:52,566 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:46:52,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a47b0ed{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:46:52,568 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58445dea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:46:52,568 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:46:52,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fc5598e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:46:52,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74548cdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.log.dir/,STOPPED} 2024-11-20T14:46:52,570 WARN [BP-1758166626-172.17.0.2-1732113916695 heartbeating to localhost/127.0.0.1:40825 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:46:52,570 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:46:52,570 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:46:52,570 WARN [BP-1758166626-172.17.0.2-1732113916695 heartbeating to localhost/127.0.0.1:40825 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1758166626-172.17.0.2-1732113916695 (Datanode Uuid 9a5088b2-a1aa-47aa-9ccb-7b237a8c173d) service to localhost/127.0.0.1:40825 2024-11-20T14:46:52,571 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/data/data1/current/BP-1758166626-172.17.0.2-1732113916695 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:46:52,571 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/cluster_9a6aee9f-51a9-ae75-ebb4-9fb7052c5316/data/data2/current/BP-1758166626-172.17.0.2-1732113916695 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:46:52,571 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:46:52,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c1a236c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:46:52,581 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:46:52,581 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:46:52,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:46:52,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.log.dir/,STOPPED} 2024-11-20T14:46:52,591 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T14:46:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T14:46:52,637 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40825 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:40825 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:40825 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/1a15ecfd95f4:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:40825 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:40825 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/1a15ecfd95f4:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40825 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40825 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/1a15ecfd95f4:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@69147b9 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40825 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=304 (was 380), ProcessCount=11 (was 11), AvailableMemoryMB=11000 (was 10862) - AvailableMemoryMB LEAK? - 2024-11-20T14:46:52,644 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=304, ProcessCount=11, AvailableMemoryMB=10999 2024-11-20T14:46:52,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T14:46:52,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.log.dir so I do NOT create it in target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727 2024-11-20T14:46:52,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f9688ba8-bae0-617f-13f8-257c78926418/hadoop.tmp.dir so I do NOT create it in target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727 2024-11-20T14:46:52,645 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb, deleteOnExit=true 2024-11-20T14:46:52,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T14:46:52,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/test.cache.data in system properties and HBase conf 2024-11-20T14:46:52,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T14:46:52,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.log.dir in system properties and HBase conf 2024-11-20T14:46:52,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T14:46:52,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T14:46:52,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T14:46:52,646 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T14:46:52,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:46:52,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:46:52,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T14:46:52,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/nfs.dump.dir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/java.io.tmpdir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T14:46:52,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T14:46:52,666 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:46:52,983 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:46:52,990 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:46:52,992 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:46:52,992 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:46:52,992 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:46:52,993 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:46:52,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65ee0588{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:46:52,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@246aaef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:46:53,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@13750752{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/java.io.tmpdir/jetty-localhost-40503-hadoop-hdfs-3_4_1-tests_jar-_-any-623262973350796735/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:46:53,132 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a90f125{HTTP/1.1, (http/1.1)}{localhost:40503} 2024-11-20T14:46:53,132 INFO [Time-limited test {}] server.Server(415): Started @98332ms 2024-11-20T14:46:53,149 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:46:53,419 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:46:53,428 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:46:53,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:46:53,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:46:53,429 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:46:53,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42736c41{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:46:53,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232f5aee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:46:53,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73f425bc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/java.io.tmpdir/jetty-localhost-39691-hadoop-hdfs-3_4_1-tests_jar-_-any-13284500042617078362/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:46:53,543 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72881875{HTTP/1.1, (http/1.1)}{localhost:39691} 2024-11-20T14:46:53,543 INFO [Time-limited test {}] server.Server(415): Started @98742ms 2024-11-20T14:46:53,545 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:46:53,588 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:46:53,594 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:46:53,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:46:53,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:46:53,595 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:46:53,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d1da8a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:46:53,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57a400a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:46:53,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4e3eae67{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/java.io.tmpdir/jetty-localhost-36927-hadoop-hdfs-3_4_1-tests_jar-_-any-3934805666875923494/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:46:53,709 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4dcd5df2{HTTP/1.1, (http/1.1)}{localhost:36927} 2024-11-20T14:46:53,709 INFO [Time-limited test {}] server.Server(415): Started @98909ms 2024-11-20T14:46:53,711 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:46:54,283 WARN [Thread-445 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/data/data1/current/BP-343060298-172.17.0.2-1732114012681/current, will proceed with Du for space computation calculation, 2024-11-20T14:46:54,283 WARN [Thread-446 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/data/data2/current/BP-343060298-172.17.0.2-1732114012681/current, will proceed with Du for space computation calculation, 2024-11-20T14:46:54,305 WARN [Thread-410 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:46:54,308 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac3f76306002dd67 with lease ID 0xa126745576f612fa: Processing first storage report for DS-b6bebc6c-3d47-46de-aa2b-e3a7ebe73efa from datanode DatanodeRegistration(127.0.0.1:41613, datanodeUuid=d05ee728-e7cd-4549-ae65-5a14b2b7ec62, infoPort=42197, infoSecurePort=0, ipcPort=36713, storageInfo=lv=-57;cid=testClusterID;nsid=789200181;c=1732114012681) 2024-11-20T14:46:54,309 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac3f76306002dd67 with lease ID 0xa126745576f612fa: from storage DS-b6bebc6c-3d47-46de-aa2b-e3a7ebe73efa node DatanodeRegistration(127.0.0.1:41613, datanodeUuid=d05ee728-e7cd-4549-ae65-5a14b2b7ec62, infoPort=42197, infoSecurePort=0, ipcPort=36713, storageInfo=lv=-57;cid=testClusterID;nsid=789200181;c=1732114012681), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:46:54,309 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac3f76306002dd67 with lease ID 0xa126745576f612fa: Processing first storage report for DS-d36ac25a-c0c1-460f-ab70-8dc40b7e3b10 from datanode DatanodeRegistration(127.0.0.1:41613, datanodeUuid=d05ee728-e7cd-4549-ae65-5a14b2b7ec62, infoPort=42197, infoSecurePort=0, ipcPort=36713, storageInfo=lv=-57;cid=testClusterID;nsid=789200181;c=1732114012681) 2024-11-20T14:46:54,309 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac3f76306002dd67 with lease ID 0xa126745576f612fa: from storage DS-d36ac25a-c0c1-460f-ab70-8dc40b7e3b10 node DatanodeRegistration(127.0.0.1:41613, datanodeUuid=d05ee728-e7cd-4549-ae65-5a14b2b7ec62, infoPort=42197, infoSecurePort=0, ipcPort=36713, storageInfo=lv=-57;cid=testClusterID;nsid=789200181;c=1732114012681), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:46:54,528 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/data/data3/current/BP-343060298-172.17.0.2-1732114012681/current, will proceed with Du for space computation calculation, 2024-11-20T14:46:54,531 WARN [Thread-458 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/data/data4/current/BP-343060298-172.17.0.2-1732114012681/current, will proceed with Du for space computation calculation, 2024-11-20T14:46:54,548 WARN [Thread-433 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:46:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb61d2a79c0d5519 with lease ID 0xa126745576f612fb: Processing first storage report for DS-de6af2b4-f52f-412f-a309-e0408c6c2c5a from datanode DatanodeRegistration(127.0.0.1:43785, datanodeUuid=82fec9c8-8860-43ed-b9cb-a627f4df0822, infoPort=33181, infoSecurePort=0, ipcPort=40837, storageInfo=lv=-57;cid=testClusterID;nsid=789200181;c=1732114012681) 2024-11-20T14:46:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb61d2a79c0d5519 with lease ID 0xa126745576f612fb: from storage DS-de6af2b4-f52f-412f-a309-e0408c6c2c5a node DatanodeRegistration(127.0.0.1:43785, datanodeUuid=82fec9c8-8860-43ed-b9cb-a627f4df0822, infoPort=33181, infoSecurePort=0, ipcPort=40837, storageInfo=lv=-57;cid=testClusterID;nsid=789200181;c=1732114012681), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:46:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb61d2a79c0d5519 with lease ID 0xa126745576f612fb: Processing first storage report for DS-21a02830-bb13-4079-bb64-d9ce3f3d39e2 from datanode DatanodeRegistration(127.0.0.1:43785, datanodeUuid=82fec9c8-8860-43ed-b9cb-a627f4df0822, infoPort=33181, infoSecurePort=0, ipcPort=40837, storageInfo=lv=-57;cid=testClusterID;nsid=789200181;c=1732114012681) 2024-11-20T14:46:54,551 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb61d2a79c0d5519 with lease ID 0xa126745576f612fb: from storage DS-21a02830-bb13-4079-bb64-d9ce3f3d39e2 node DatanodeRegistration(127.0.0.1:43785, datanodeUuid=82fec9c8-8860-43ed-b9cb-a627f4df0822, infoPort=33181, infoSecurePort=0, ipcPort=40837, storageInfo=lv=-57;cid=testClusterID;nsid=789200181;c=1732114012681), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T14:46:54,652 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727 2024-11-20T14:46:54,655 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/zookeeper_0, clientPort=62813, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T14:46:54,656 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62813 2024-11-20T14:46:54,656 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:54,658 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:54,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:46:54,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:46:54,671 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da with version=8 2024-11-20T14:46:54,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase-staging 2024-11-20T14:46:54,673 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:46:54,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:54,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:54,673 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:46:54,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:54,674 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:46:54,674 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T14:46:54,674 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:46:54,675 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43497 2024-11-20T14:46:54,677 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43497 connecting to ZooKeeper ensemble=127.0.0.1:62813 2024-11-20T14:46:54,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:434970x0, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:46:54,728 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43497-0x10159fff4830000 connected 2024-11-20T14:46:54,798 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:54,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:54,805 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:46:54,805 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da, hbase.cluster.distributed=false 2024-11-20T14:46:54,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:46:54,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43497 2024-11-20T14:46:54,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43497 2024-11-20T14:46:54,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43497 2024-11-20T14:46:54,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43497 2024-11-20T14:46:54,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43497 2024-11-20T14:46:54,828 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:46:54,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:54,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:54,829 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:46:54,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:54,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:46:54,829 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T14:46:54,829 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:46:54,830 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36561 2024-11-20T14:46:54,832 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36561 connecting to ZooKeeper ensemble=127.0.0.1:62813 2024-11-20T14:46:54,833 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:54,835 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:54,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365610x0, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:46:54,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:365610x0, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:46:54,848 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36561-0x10159fff4830001 connected 2024-11-20T14:46:54,848 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T14:46:54,849 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T14:46:54,850 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T14:46:54,851 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:46:54,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36561 2024-11-20T14:46:54,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36561 2024-11-20T14:46:54,854 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36561 2024-11-20T14:46:54,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36561 2024-11-20T14:46:54,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36561 2024-11-20T14:46:54,870 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a15ecfd95f4:43497 2024-11-20T14:46:54,871 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:54,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:46:54,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:46:54,881 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:54,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T14:46:54,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:54,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:54,890 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T14:46:54,891 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a15ecfd95f4,43497,1732114014673 from backup master directory 2024-11-20T14:46:54,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:54,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:46:54,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:46:54,898 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:46:54,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:54,904 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/hbase.id] with ID: 970d4de5-b67c-4786-9ac0-99a0a0eee071 2024-11-20T14:46:54,904 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/.tmp/hbase.id 2024-11-20T14:46:54,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:46:54,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:46:54,912 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/.tmp/hbase.id]:[hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/hbase.id] 2024-11-20T14:46:54,929 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:54,930 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T14:46:54,932 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T14:46:54,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:54,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:54,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:46:54,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:46:54,951 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:46:54,953 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T14:46:54,955 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:46:54,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:46:54,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:46:54,972 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store 2024-11-20T14:46:54,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:46:54,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:46:54,982 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:46:54,983 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:46:54,983 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:54,983 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:54,983 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:46:54,983 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:54,983 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:54,983 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114014982Disabling compacts and flushes for region at 1732114014982Disabling writes for close at 1732114014983 (+1 ms)Writing region close event to WAL at 1732114014983Closed at 1732114014983 2024-11-20T14:46:54,985 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/.initializing 2024-11-20T14:46:54,985 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/WALs/1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:54,989 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C43497%2C1732114014673, suffix=, logDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/WALs/1a15ecfd95f4,43497,1732114014673, archiveDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/oldWALs, maxLogs=10 2024-11-20T14:46:54,990 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C43497%2C1732114014673.1732114014989 2024-11-20T14:46:54,997 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/WALs/1a15ecfd95f4,43497,1732114014673/1a15ecfd95f4%2C43497%2C1732114014673.1732114014989 2024-11-20T14:46:54,999 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42197:42197),(127.0.0.1/127.0.0.1:33181:33181)] 2024-11-20T14:46:55,003 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:46:55,003 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:46:55,003 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,003 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T14:46:55,007 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T14:46:55,010 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:46:55,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T14:46:55,014 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:46:55,015 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,016 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T14:46:55,017 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:46:55,017 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,018 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,018 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,020 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,020 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,021 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T14:46:55,022 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:55,027 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:46:55,028 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690648, jitterRate=-0.12179650366306305}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T14:46:55,030 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732114015004Initializing all the Stores at 1732114015005 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114015005Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114015005Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114015005Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114015005Cleaning up temporary data from old regions at 1732114015020 (+15 ms)Region opened successfully at 1732114015030 (+10 ms) 2024-11-20T14:46:55,030 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T14:46:55,039 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17ece9bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:46:55,040 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T14:46:55,041 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T14:46:55,041 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T14:46:55,041 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T14:46:55,042 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T14:46:55,042 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T14:46:55,043 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T14:46:55,047 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T14:46:55,048 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T14:46:55,071 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T14:46:55,071 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T14:46:55,072 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T14:46:55,081 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T14:46:55,081 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T14:46:55,082 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T14:46:55,089 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T14:46:55,090 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T14:46:55,097 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T14:46:55,100 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T14:46:55,106 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T14:46:55,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:46:55,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:55,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:46:55,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:55,117 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a15ecfd95f4,43497,1732114014673, sessionid=0x10159fff4830000, setting cluster-up flag (Was=false) 2024-11-20T14:46:55,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:55,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:55,156 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T14:46:55,158 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:55,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:55,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:55,197 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T14:46:55,200 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:55,203 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T14:46:55,206 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T14:46:55,206 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T14:46:55,206 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T14:46:55,206 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a15ecfd95f4,43497,1732114014673 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T14:46:55,209 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:46:55,209 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:46:55,209 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:46:55,209 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:46:55,209 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a15ecfd95f4:0, corePoolSize=10, maxPoolSize=10 2024-11-20T14:46:55,209 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,210 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:46:55,210 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,213 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:46:55,213 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T14:46:55,213 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732114045213 2024-11-20T14:46:55,213 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T14:46:55,213 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T14:46:55,213 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T14:46:55,213 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T14:46:55,213 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T14:46:55,214 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T14:46:55,214 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,214 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T14:46:55,214 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,214 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T14:46:55,215 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T14:46:55,215 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T14:46:55,215 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T14:46:55,215 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T14:46:55,216 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114015216,5,FailOnTimeoutGroup] 2024-11-20T14:46:55,216 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114015216,5,FailOnTimeoutGroup] 2024-11-20T14:46:55,216 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,216 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T14:46:55,216 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,216 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:46:55,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:46:55,223 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T14:46:55,224 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da 2024-11-20T14:46:55,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:46:55,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:46:55,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:46:55,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:46:55,241 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:46:55,241 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:46:55,244 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:46:55,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:46:55,247 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:46:55,247 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:46:55,250 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:46:55,250 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:46:55,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740 2024-11-20T14:46:55,252 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740 2024-11-20T14:46:55,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:46:55,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:46:55,254 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:46:55,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:46:55,258 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(746): ClusterId : 970d4de5-b67c-4786-9ac0-99a0a0eee071 2024-11-20T14:46:55,258 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T14:46:55,259 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:46:55,259 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782401, jitterRate=-0.005125775933265686}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:46:55,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732114015238Initializing all the Stores at 1732114015239 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114015239Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114015239Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114015239Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114015239Cleaning up temporary data from old regions at 1732114015253 (+14 ms)Region opened successfully at 1732114015261 (+8 ms) 2024-11-20T14:46:55,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:46:55,261 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:46:55,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:46:55,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:46:55,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:46:55,263 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:46:55,263 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114015261Disabling compacts and flushes for region at 1732114015261Disabling writes for close at 1732114015261Writing region close event to WAL at 1732114015263 (+2 ms)Closed at 1732114015263 2024-11-20T14:46:55,265 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:46:55,265 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T14:46:55,265 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T14:46:55,268 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:46:55,270 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T14:46:55,282 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T14:46:55,282 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T14:46:55,290 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T14:46:55,291 DEBUG [RS:0;1a15ecfd95f4:36561 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aaf38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:46:55,303 DEBUG [RS:0;1a15ecfd95f4:36561 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a15ecfd95f4:36561 2024-11-20T14:46:55,303 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T14:46:55,303 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T14:46:55,303 DEBUG [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T14:46:55,305 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a15ecfd95f4,43497,1732114014673 with port=36561, startcode=1732114014828 2024-11-20T14:46:55,305 DEBUG [RS:0;1a15ecfd95f4:36561 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T14:46:55,307 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34225, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T14:46:55,308 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43497 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:55,308 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43497 {}] master.ServerManager(517): Registering regionserver=1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:55,310 DEBUG [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da 2024-11-20T14:46:55,310 DEBUG [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34345 2024-11-20T14:46:55,311 DEBUG [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T14:46:55,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:46:55,321 DEBUG [RS:0;1a15ecfd95f4:36561 {}] zookeeper.ZKUtil(111): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:55,321 WARN [RS:0;1a15ecfd95f4:36561 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:46:55,321 INFO [RS:0;1a15ecfd95f4:36561 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:46:55,322 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a15ecfd95f4,36561,1732114014828] 2024-11-20T14:46:55,322 DEBUG [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/WALs/1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:55,325 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T14:46:55,328 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T14:46:55,331 INFO [RS:0;1a15ecfd95f4:36561 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:46:55,331 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,335 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T14:46:55,336 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T14:46:55,336 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,337 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,337 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,337 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,337 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,337 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,337 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:46:55,337 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,338 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,338 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,338 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,338 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,338 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:55,338 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:46:55,338 DEBUG [RS:0;1a15ecfd95f4:36561 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:46:55,339 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,339 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,339 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,340 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,340 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,340 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,36561,1732114014828-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:46:55,364 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T14:46:55,364 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,36561,1732114014828-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,364 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,364 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.Replication(171): 1a15ecfd95f4,36561,1732114014828 started 2024-11-20T14:46:55,388 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:55,389 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1482): Serving as 1a15ecfd95f4,36561,1732114014828, RpcServer on 1a15ecfd95f4/172.17.0.2:36561, sessionid=0x10159fff4830001 2024-11-20T14:46:55,389 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T14:46:55,389 DEBUG [RS:0;1a15ecfd95f4:36561 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:55,389 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,36561,1732114014828' 2024-11-20T14:46:55,389 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T14:46:55,390 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T14:46:55,391 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T14:46:55,391 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T14:46:55,391 DEBUG [RS:0;1a15ecfd95f4:36561 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:55,391 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,36561,1732114014828' 2024-11-20T14:46:55,391 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T14:46:55,392 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T14:46:55,392 DEBUG [RS:0;1a15ecfd95f4:36561 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T14:46:55,392 INFO [RS:0;1a15ecfd95f4:36561 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T14:46:55,392 INFO [RS:0;1a15ecfd95f4:36561 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T14:46:55,421 WARN [1a15ecfd95f4:43497 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-20T14:46:55,496 INFO [RS:0;1a15ecfd95f4:36561 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C36561%2C1732114014828, suffix=, logDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/WALs/1a15ecfd95f4,36561,1732114014828, archiveDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/oldWALs, maxLogs=32 2024-11-20T14:46:55,499 INFO [RS:0;1a15ecfd95f4:36561 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C36561%2C1732114014828.1732114015499 2024-11-20T14:46:55,508 INFO [RS:0;1a15ecfd95f4:36561 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/WALs/1a15ecfd95f4,36561,1732114014828/1a15ecfd95f4%2C36561%2C1732114014828.1732114015499 2024-11-20T14:46:55,510 DEBUG [RS:0;1a15ecfd95f4:36561 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42197:42197),(127.0.0.1/127.0.0.1:33181:33181)] 2024-11-20T14:46:55,671 DEBUG [1a15ecfd95f4:43497 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T14:46:55,672 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:55,675 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,36561,1732114014828, state=OPENING 2024-11-20T14:46:55,697 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T14:46:55,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:55,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:55,749 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:46:55,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,36561,1732114014828}] 2024-11-20T14:46:55,749 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:46:55,749 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:46:55,905 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T14:46:55,908 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35053, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T14:46:55,913 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T14:46:55,914 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:46:55,916 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C36561%2C1732114014828.meta, suffix=.meta, logDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/WALs/1a15ecfd95f4,36561,1732114014828, archiveDir=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/oldWALs, maxLogs=32 2024-11-20T14:46:55,918 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C36561%2C1732114014828.meta.1732114015918.meta 2024-11-20T14:46:55,925 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/WALs/1a15ecfd95f4,36561,1732114014828/1a15ecfd95f4%2C36561%2C1732114014828.meta.1732114015918.meta 2024-11-20T14:46:55,926 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42197:42197),(127.0.0.1/127.0.0.1:33181:33181)] 2024-11-20T14:46:55,926 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:46:55,927 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T14:46:55,927 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T14:46:55,927 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T14:46:55,927 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T14:46:55,927 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:46:55,927 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T14:46:55,927 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T14:46:55,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:46:55,931 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:46:55,931 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:46:55,933 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:46:55,933 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:46:55,934 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:46:55,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:46:55,936 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:46:55,937 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:55,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:55,937 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:46:55,938 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740 2024-11-20T14:46:55,940 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740 2024-11-20T14:46:55,942 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:46:55,942 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:46:55,942 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:46:55,944 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:46:55,945 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=823880, jitterRate=0.04761865735054016}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:46:55,946 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T14:46:55,947 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732114015928Writing region info on filesystem at 1732114015928Initializing all the Stores at 1732114015929 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114015929Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114015929Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114015929Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114015929Cleaning up temporary data from old regions at 1732114015942 (+13 ms)Running coprocessor post-open hooks at 1732114015946 (+4 ms)Region opened successfully at 1732114015947 (+1 ms) 2024-11-20T14:46:55,948 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732114015904 2024-11-20T14:46:55,951 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T14:46:55,951 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T14:46:55,952 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:55,953 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,36561,1732114014828, state=OPEN 2024-11-20T14:46:56,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:46:56,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:46:56,006 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:56,006 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:46:56,006 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:46:56,010 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T14:46:56,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,36561,1732114014828 in 257 msec 2024-11-20T14:46:56,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T14:46:56,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 746 msec 2024-11-20T14:46:56,016 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:46:56,016 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T14:46:56,018 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:46:56,018 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,36561,1732114014828, seqNum=-1] 2024-11-20T14:46:56,019 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:46:56,021 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52095, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:46:56,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 822 msec 2024-11-20T14:46:56,030 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732114016030, completionTime=-1 2024-11-20T14:46:56,030 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T14:46:56,030 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-20T14:46:56,032 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-20T14:46:56,032 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732114076032 2024-11-20T14:46:56,033 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732114136033 2024-11-20T14:46:56,033 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-20T14:46:56,033 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,43497,1732114014673-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:56,033 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,43497,1732114014673-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:56,033 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,43497,1732114014673-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:56,033 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a15ecfd95f4:43497, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:56,033 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:56,034 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:56,036 DEBUG [master/1a15ecfd95f4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T14:46:56,039 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.141sec 2024-11-20T14:46:56,039 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T14:46:56,039 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T14:46:56,039 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T14:46:56,039 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T14:46:56,039 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T14:46:56,039 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,43497,1732114014673-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:46:56,040 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,43497,1732114014673-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T14:46:56,043 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T14:46:56,043 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T14:46:56,043 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,43497,1732114014673-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:56,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f96645b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:46:56,059 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a15ecfd95f4,43497,-1 for getting cluster id 2024-11-20T14:46:56,059 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T14:46:56,061 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '970d4de5-b67c-4786-9ac0-99a0a0eee071' 2024-11-20T14:46:56,061 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T14:46:56,062 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "970d4de5-b67c-4786-9ac0-99a0a0eee071" 2024-11-20T14:46:56,062 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e2defca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:46:56,062 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a15ecfd95f4,43497,-1] 2024-11-20T14:46:56,062 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T14:46:56,067 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:56,068 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T14:46:56,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ae7cc66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:46:56,070 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:46:56,071 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,36561,1732114014828, seqNum=-1] 2024-11-20T14:46:56,071 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:46:56,077 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39266, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:46:56,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:56,080 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:56,083 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T14:46:56,083 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T14:46:56,083 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:46:56,083 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:46:56,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:56,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:56,084 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T14:46:56,084 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T14:46:56,084 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1891077224, stopped=false 2024-11-20T14:46:56,084 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a15ecfd95f4,43497,1732114014673 2024-11-20T14:46:56,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:46:56,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:46:56,106 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:46:56,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:56,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:56,106 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:46:56,106 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:46:56,106 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:56,106 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:46:56,107 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a15ecfd95f4,36561,1732114014828' ***** 2024-11-20T14:46:56,107 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T14:46:56,107 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:46:56,107 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T14:46:56,107 INFO [RS:0;1a15ecfd95f4:36561 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T14:46:56,107 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T14:46:56,107 INFO [RS:0;1a15ecfd95f4:36561 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T14:46:56,107 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(959): stopping server 1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:56,107 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:46:56,107 INFO [RS:0;1a15ecfd95f4:36561 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a15ecfd95f4:36561. 2024-11-20T14:46:56,107 DEBUG [RS:0;1a15ecfd95f4:36561 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:46:56,107 DEBUG [RS:0;1a15ecfd95f4:36561 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:56,108 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T14:46:56,108 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T14:46:56,108 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T14:46:56,108 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T14:46:56,108 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-20T14:46:56,108 DEBUG [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T14:46:56,108 DEBUG [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T14:46:56,108 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:46:56,108 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:46:56,108 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:46:56,108 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:46:56,108 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:46:56,109 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-20T14:46:56,127 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740/.tmp/ns/4c17988e3ad44482b65719fed0ba4bbc is 43, key is default/ns:d/1732114016022/Put/seqid=0 2024-11-20T14:46:56,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741835_1011 (size=5153) 2024-11-20T14:46:56,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741835_1011 (size=5153) 2024-11-20T14:46:56,134 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740/.tmp/ns/4c17988e3ad44482b65719fed0ba4bbc 2024-11-20T14:46:56,142 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740/.tmp/ns/4c17988e3ad44482b65719fed0ba4bbc as hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740/ns/4c17988e3ad44482b65719fed0ba4bbc 2024-11-20T14:46:56,150 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740/ns/4c17988e3ad44482b65719fed0ba4bbc, entries=2, sequenceid=6, filesize=5.0 K 2024-11-20T14:46:56,152 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-11-20T14:46:56,152 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T14:46:56,159 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T14:46:56,160 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:46:56,160 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:46:56,160 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114016108Running coprocessor pre-close hooks at 1732114016108Disabling compacts and flushes for region at 1732114016108Disabling writes for close at 1732114016108Obtaining lock to block concurrent updates at 1732114016109 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732114016109Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732114016109Flushing stores of hbase:meta,,1.1588230740 at 1732114016110 (+1 ms)Flushing 1588230740/ns: creating writer at 1732114016110Flushing 1588230740/ns: appending metadata at 1732114016127 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732114016127Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38972229: reopening flushed file at 1732114016141 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1732114016152 (+11 ms)Writing region close event to WAL at 1732114016155 (+3 ms)Running coprocessor post-close hooks at 1732114016159 (+4 ms)Closed at 1732114016160 (+1 ms) 2024-11-20T14:46:56,160 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T14:46:56,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:46:56,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:46:56,308 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(976): stopping server 1a15ecfd95f4,36561,1732114014828; all regions closed. 2024-11-20T14:46:56,309 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,309 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,309 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,309 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,309 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741834_1010 (size=1152) 2024-11-20T14:46:56,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741834_1010 (size=1152) 2024-11-20T14:46:56,315 DEBUG [RS:0;1a15ecfd95f4:36561 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/oldWALs 2024-11-20T14:46:56,315 INFO [RS:0;1a15ecfd95f4:36561 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C36561%2C1732114014828.meta:.meta(num 1732114015918) 2024-11-20T14:46:56,315 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,315 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,315 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,315 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,316 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741833_1009 (size=93) 2024-11-20T14:46:56,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741833_1009 (size=93) 2024-11-20T14:46:56,321 DEBUG [RS:0;1a15ecfd95f4:36561 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/oldWALs 2024-11-20T14:46:56,321 INFO [RS:0;1a15ecfd95f4:36561 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C36561%2C1732114014828:(num 1732114015499) 2024-11-20T14:46:56,321 DEBUG [RS:0;1a15ecfd95f4:36561 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:46:56,321 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:46:56,321 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:46:56,321 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.ChoreService(370): Chore service for: regionserver/1a15ecfd95f4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T14:46:56,321 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:46:56,321 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:46:56,322 INFO [RS:0;1a15ecfd95f4:36561 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36561 2024-11-20T14:46:56,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:46:56,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a15ecfd95f4,36561,1732114014828 2024-11-20T14:46:56,347 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:46:56,356 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a15ecfd95f4,36561,1732114014828] 2024-11-20T14:46:56,364 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a15ecfd95f4,36561,1732114014828 already deleted, retry=false 2024-11-20T14:46:56,364 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a15ecfd95f4,36561,1732114014828 expired; onlineServers=0 2024-11-20T14:46:56,364 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a15ecfd95f4,43497,1732114014673' ***** 2024-11-20T14:46:56,364 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T14:46:56,364 INFO [M:0;1a15ecfd95f4:43497 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:46:56,364 INFO [M:0;1a15ecfd95f4:43497 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:46:56,364 DEBUG [M:0;1a15ecfd95f4:43497 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T14:46:56,365 DEBUG [M:0;1a15ecfd95f4:43497 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T14:46:56,365 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T14:46:56,365 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114015216 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114015216,5,FailOnTimeoutGroup] 2024-11-20T14:46:56,365 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114015216 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114015216,5,FailOnTimeoutGroup] 2024-11-20T14:46:56,365 INFO [M:0;1a15ecfd95f4:43497 {}] hbase.ChoreService(370): Chore service for: master/1a15ecfd95f4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T14:46:56,365 INFO [M:0;1a15ecfd95f4:43497 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:46:56,365 DEBUG [M:0;1a15ecfd95f4:43497 {}] master.HMaster(1795): Stopping service threads 2024-11-20T14:46:56,365 INFO [M:0;1a15ecfd95f4:43497 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T14:46:56,365 INFO [M:0;1a15ecfd95f4:43497 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:46:56,365 INFO [M:0;1a15ecfd95f4:43497 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T14:46:56,366 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T14:46:56,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T14:46:56,373 DEBUG [M:0;1a15ecfd95f4:43497 {}] zookeeper.ZKUtil(347): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T14:46:56,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:56,373 WARN [M:0;1a15ecfd95f4:43497 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T14:46:56,373 INFO [M:0;1a15ecfd95f4:43497 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/.lastflushedseqids 2024-11-20T14:46:56,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741836_1012 (size=108) 2024-11-20T14:46:56,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741836_1012 (size=108) 2024-11-20T14:46:56,383 INFO [M:0;1a15ecfd95f4:43497 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T14:46:56,383 INFO [M:0;1a15ecfd95f4:43497 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T14:46:56,383 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:46:56,383 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:56,383 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:56,383 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:46:56,383 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:56,383 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-20T14:46:56,401 DEBUG [M:0;1a15ecfd95f4:43497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d225beed74d144fea24887f420fa9c9b is 82, key is hbase:meta,,1/info:regioninfo/1732114015952/Put/seqid=0 2024-11-20T14:46:56,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741837_1013 (size=5672) 2024-11-20T14:46:56,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741837_1013 (size=5672) 2024-11-20T14:46:56,414 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d225beed74d144fea24887f420fa9c9b 2024-11-20T14:46:56,439 DEBUG [M:0;1a15ecfd95f4:43497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cf0b00c52c6a435da593fcd1202f4772 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732114016029/Put/seqid=0 2024-11-20T14:46:56,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741838_1014 (size=5275) 2024-11-20T14:46:56,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741838_1014 (size=5275) 2024-11-20T14:46:56,449 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cf0b00c52c6a435da593fcd1202f4772 2024-11-20T14:46:56,456 INFO [RS:0;1a15ecfd95f4:36561 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:46:56,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:46:56,456 INFO [RS:0;1a15ecfd95f4:36561 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a15ecfd95f4,36561,1732114014828; zookeeper connection closed. 2024-11-20T14:46:56,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36561-0x10159fff4830001, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:46:56,456 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2dce5085 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2dce5085 2024-11-20T14:46:56,456 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T14:46:56,472 DEBUG [M:0;1a15ecfd95f4:43497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5b42acda8c524cc9b6263461478f447c is 69, key is 1a15ecfd95f4,36561,1732114014828/rs:state/1732114015309/Put/seqid=0 2024-11-20T14:46:56,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741839_1015 (size=5156) 2024-11-20T14:46:56,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741839_1015 (size=5156) 2024-11-20T14:46:56,479 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5b42acda8c524cc9b6263461478f447c 2024-11-20T14:46:56,502 DEBUG [M:0;1a15ecfd95f4:43497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d80edc6d148a4bd49101d97e8e640dbe is 52, key is load_balancer_on/state:d/1732114016082/Put/seqid=0 2024-11-20T14:46:56,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741840_1016 (size=5056) 2024-11-20T14:46:56,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741840_1016 (size=5056) 2024-11-20T14:46:56,509 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d80edc6d148a4bd49101d97e8e640dbe 2024-11-20T14:46:56,517 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d225beed74d144fea24887f420fa9c9b as hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d225beed74d144fea24887f420fa9c9b 2024-11-20T14:46:56,524 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d225beed74d144fea24887f420fa9c9b, entries=8, sequenceid=29, filesize=5.5 K 2024-11-20T14:46:56,526 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cf0b00c52c6a435da593fcd1202f4772 as hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cf0b00c52c6a435da593fcd1202f4772 2024-11-20T14:46:56,533 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cf0b00c52c6a435da593fcd1202f4772, entries=3, sequenceid=29, filesize=5.2 K 2024-11-20T14:46:56,534 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5b42acda8c524cc9b6263461478f447c as hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5b42acda8c524cc9b6263461478f447c 2024-11-20T14:46:56,542 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5b42acda8c524cc9b6263461478f447c, entries=1, sequenceid=29, filesize=5.0 K 2024-11-20T14:46:56,543 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d80edc6d148a4bd49101d97e8e640dbe as hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d80edc6d148a4bd49101d97e8e640dbe 2024-11-20T14:46:56,550 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34345/user/jenkins/test-data/a8d72f90-2129-ceb2-6cb0-58d698a5b9da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d80edc6d148a4bd49101d97e8e640dbe, entries=1, sequenceid=29, filesize=4.9 K 2024-11-20T14:46:56,552 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 169ms, sequenceid=29, compaction requested=false 2024-11-20T14:46:56,553 INFO [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:56,554 DEBUG [M:0;1a15ecfd95f4:43497 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114016383Disabling compacts and flushes for region at 1732114016383Disabling writes for close at 1732114016383Obtaining lock to block concurrent updates at 1732114016383Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732114016383Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732114016384 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732114016385 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732114016385Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732114016401 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732114016401Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732114016422 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732114016439 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732114016439Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732114016455 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732114016471 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732114016472 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732114016486 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732114016502 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732114016502Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14bae802: reopening flushed file at 1732114016516 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e2643fc: reopening flushed file at 1732114016524 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ec8e954: reopening flushed file at 1732114016533 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63ea31f1: reopening flushed file at 1732114016542 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 169ms, sequenceid=29, compaction requested=false at 1732114016552 (+10 ms)Writing region close event to WAL at 1732114016553 (+1 ms)Closed at 1732114016553 2024-11-20T14:46:56,554 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,554 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,554 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,554 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,554 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:46:56,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43785 is added to blk_1073741830_1006 (size=10311) 2024-11-20T14:46:56,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41613 is added to blk_1073741830_1006 (size=10311) 2024-11-20T14:46:56,558 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:46:56,558 INFO [M:0;1a15ecfd95f4:43497 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T14:46:56,558 INFO [M:0;1a15ecfd95f4:43497 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43497 2024-11-20T14:46:56,558 INFO [M:0;1a15ecfd95f4:43497 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:46:56,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:46:56,673 INFO [M:0;1a15ecfd95f4:43497 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:46:56,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43497-0x10159fff4830000, quorum=127.0.0.1:62813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:46:56,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4e3eae67{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:46:56,676 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4dcd5df2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:46:56,677 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:46:56,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57a400a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:46:56,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d1da8a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.log.dir/,STOPPED} 2024-11-20T14:46:56,679 WARN [BP-343060298-172.17.0.2-1732114012681 heartbeating to localhost/127.0.0.1:34345 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:46:56,679 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:46:56,679 WARN [BP-343060298-172.17.0.2-1732114012681 heartbeating to localhost/127.0.0.1:34345 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-343060298-172.17.0.2-1732114012681 (Datanode Uuid 82fec9c8-8860-43ed-b9cb-a627f4df0822) service to localhost/127.0.0.1:34345 2024-11-20T14:46:56,679 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:46:56,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/data/data3/current/BP-343060298-172.17.0.2-1732114012681 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:46:56,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/data/data4/current/BP-343060298-172.17.0.2-1732114012681 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:46:56,680 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:46:56,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73f425bc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:46:56,684 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72881875{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:46:56,684 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:46:56,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232f5aee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:46:56,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42736c41{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.log.dir/,STOPPED} 2024-11-20T14:46:56,685 WARN [BP-343060298-172.17.0.2-1732114012681 heartbeating to localhost/127.0.0.1:34345 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:46:56,685 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:46:56,685 WARN [BP-343060298-172.17.0.2-1732114012681 heartbeating to localhost/127.0.0.1:34345 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-343060298-172.17.0.2-1732114012681 (Datanode Uuid d05ee728-e7cd-4549-ae65-5a14b2b7ec62) service to localhost/127.0.0.1:34345 2024-11-20T14:46:56,685 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:46:56,686 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/data/data1/current/BP-343060298-172.17.0.2-1732114012681 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:46:56,686 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/cluster_a9ae1166-ee72-e552-185f-95765b4a22fb/data/data2/current/BP-343060298-172.17.0.2-1732114012681 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:46:56,686 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:46:56,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@13750752{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:46:56,692 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a90f125{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:46:56,692 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:46:56,692 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@246aaef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:46:56,693 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65ee0588{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.log.dir/,STOPPED} 2024-11-20T14:46:56,699 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T14:46:56,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T14:46:56,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T14:46:56,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.log.dir so I do NOT create it in target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e 2024-11-20T14:46:56,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a6db285f-e30a-a07a-5023-9477aa1e0727/hadoop.tmp.dir so I do NOT create it in target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e 2024-11-20T14:46:56,720 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8, deleteOnExit=true 2024-11-20T14:46:56,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T14:46:56,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/test.cache.data in system properties and HBase conf 2024-11-20T14:46:56,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T14:46:56,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir in system properties and HBase conf 2024-11-20T14:46:56,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T14:46:56,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T14:46:56,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T14:46:56,721 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T14:46:56,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:46:56,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/nfs.dump.dir in system properties and HBase conf 2024-11-20T14:46:56,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/java.io.tmpdir in system properties and HBase conf 2024-11-20T14:46:56,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:46:56,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T14:46:56,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T14:46:56,738 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:46:56,796 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:46:56,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:46:56,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:46:56,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:46:56,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:46:56,989 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:46:56,996 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:46:56,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:46:56,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:46:56,997 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:46:56,998 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:46:56,999 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dda769c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:46:56,999 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c22ffb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:46:57,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24e73295{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/java.io.tmpdir/jetty-localhost-46849-hadoop-hdfs-3_4_1-tests_jar-_-any-3205006890142511974/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:46:57,114 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@226d09c5{HTTP/1.1, (http/1.1)}{localhost:46849} 2024-11-20T14:46:57,115 INFO [Time-limited test {}] server.Server(415): Started @102314ms 2024-11-20T14:46:57,129 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:46:57,340 INFO [regionserver/1a15ecfd95f4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:46:57,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:46:57,367 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:46:57,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:46:57,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:46:57,368 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:46:57,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e068543{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:46:57,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2be5a5df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:46:57,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74bd1c10{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/java.io.tmpdir/jetty-localhost-45117-hadoop-hdfs-3_4_1-tests_jar-_-any-11025293898811121096/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:46:57,477 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66f62573{HTTP/1.1, (http/1.1)}{localhost:45117} 2024-11-20T14:46:57,477 INFO [Time-limited test {}] server.Server(415): Started @102676ms 2024-11-20T14:46:57,479 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:46:57,517 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:46:57,522 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:46:57,523 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:46:57,523 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:46:57,523 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:46:57,524 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32429ee4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:46:57,524 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c42ea91{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:46:57,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69bb1e39{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/java.io.tmpdir/jetty-localhost-38475-hadoop-hdfs-3_4_1-tests_jar-_-any-10319085536741236480/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:46:57,636 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23bd1c1e{HTTP/1.1, (http/1.1)}{localhost:38475} 2024-11-20T14:46:57,636 INFO [Time-limited test {}] server.Server(415): Started @102835ms 2024-11-20T14:46:57,638 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:46:58,162 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data1/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:46:58,163 WARN [Thread-667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data2/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:46:58,186 WARN [Thread-630 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:46:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73b8f7bdfe1ca890 with lease ID 0x19b3ebfb16add78c: Processing first storage report for DS-92885184-7247-45fe-abdb-2be01c84b7e4 from datanode DatanodeRegistration(127.0.0.1:38809, datanodeUuid=fabd0784-db53-45e2-a49a-b995a1a822a6, infoPort=43855, infoSecurePort=0, ipcPort=39089, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:46:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73b8f7bdfe1ca890 with lease ID 0x19b3ebfb16add78c: from storage DS-92885184-7247-45fe-abdb-2be01c84b7e4 node DatanodeRegistration(127.0.0.1:38809, datanodeUuid=fabd0784-db53-45e2-a49a-b995a1a822a6, infoPort=43855, infoSecurePort=0, ipcPort=39089, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T14:46:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73b8f7bdfe1ca890 with lease ID 0x19b3ebfb16add78c: Processing first storage report for DS-8c446c5c-041e-4903-ab72-db112a8a1a16 from datanode DatanodeRegistration(127.0.0.1:38809, datanodeUuid=fabd0784-db53-45e2-a49a-b995a1a822a6, infoPort=43855, infoSecurePort=0, ipcPort=39089, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:46:58,195 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73b8f7bdfe1ca890 with lease ID 0x19b3ebfb16add78c: from storage DS-8c446c5c-041e-4903-ab72-db112a8a1a16 node DatanodeRegistration(127.0.0.1:38809, datanodeUuid=fabd0784-db53-45e2-a49a-b995a1a822a6, infoPort=43855, infoSecurePort=0, ipcPort=39089, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:46:58,337 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data3/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:46:58,337 WARN [Thread-678 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data4/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:46:58,355 WARN [Thread-653 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:46:58,357 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x711a55503ec9e79b with lease ID 0x19b3ebfb16add78d: Processing first storage report for DS-d454c320-7338-4438-87b7-611a2cf8c846 from datanode DatanodeRegistration(127.0.0.1:34309, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=41029, infoSecurePort=0, ipcPort=46141, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:46:58,358 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x711a55503ec9e79b with lease ID 0x19b3ebfb16add78d: from storage DS-d454c320-7338-4438-87b7-611a2cf8c846 node DatanodeRegistration(127.0.0.1:34309, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=41029, infoSecurePort=0, ipcPort=46141, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:46:58,358 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x711a55503ec9e79b with lease ID 0x19b3ebfb16add78d: Processing first storage report for DS-d7ea8a13-d8d9-498f-a5d0-a95cdb64bd31 from datanode DatanodeRegistration(127.0.0.1:34309, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=41029, infoSecurePort=0, ipcPort=46141, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:46:58,358 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x711a55503ec9e79b with lease ID 0x19b3ebfb16add78d: from storage DS-d7ea8a13-d8d9-498f-a5d0-a95cdb64bd31 node DatanodeRegistration(127.0.0.1:34309, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=41029, infoSecurePort=0, ipcPort=46141, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:46:58,389 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e 2024-11-20T14:46:58,394 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/zookeeper_0, clientPort=52514, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T14:46:58,395 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52514 2024-11-20T14:46:58,396 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:58,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:58,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:46:58,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:46:58,409 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3 with version=8 2024-11-20T14:46:58,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase-staging 2024-11-20T14:46:58,411 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:46:58,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:58,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:58,411 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:46:58,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:58,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:46:58,412 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T14:46:58,412 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:46:58,412 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33939 2024-11-20T14:46:58,414 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33939 connecting to ZooKeeper ensemble=127.0.0.1:52514 2024-11-20T14:46:58,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:339390x0, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:46:58,470 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33939-0x1015a00031e0000 connected 2024-11-20T14:46:58,531 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:58,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:58,536 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:46:58,536 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3, hbase.cluster.distributed=false 2024-11-20T14:46:58,538 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:46:58,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33939 2024-11-20T14:46:58,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33939 2024-11-20T14:46:58,542 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33939 2024-11-20T14:46:58,543 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33939 2024-11-20T14:46:58,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33939 2024-11-20T14:46:58,559 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:46:58,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:58,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:58,560 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:46:58,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:46:58,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:46:58,560 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T14:46:58,560 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:46:58,561 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45919 2024-11-20T14:46:58,563 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45919 connecting to ZooKeeper ensemble=127.0.0.1:52514 2024-11-20T14:46:58,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:58,566 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:58,580 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459190x0, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:46:58,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459190x0, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:46:58,581 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T14:46:58,583 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45919-0x1015a00031e0001 connected 2024-11-20T14:46:58,583 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T14:46:58,584 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T14:46:58,585 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:46:58,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45919 2024-11-20T14:46:58,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45919 2024-11-20T14:46:58,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45919 2024-11-20T14:46:58,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45919 2024-11-20T14:46:58,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45919 2024-11-20T14:46:58,612 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a15ecfd95f4:33939 2024-11-20T14:46:58,615 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a15ecfd95f4,33939,1732114018411 2024-11-20T14:46:58,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:46:58,622 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:46:58,623 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a15ecfd95f4,33939,1732114018411 2024-11-20T14:46:58,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,630 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T14:46:58,630 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,631 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T14:46:58,631 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a15ecfd95f4,33939,1732114018411 from backup master directory 2024-11-20T14:46:58,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a15ecfd95f4,33939,1732114018411 2024-11-20T14:46:58,639 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:46:58,639 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:46:58,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:46:58,639 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a15ecfd95f4,33939,1732114018411 2024-11-20T14:46:58,643 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/hbase.id] with ID: 78f06c82-8a64-4de6-90f6-058630f4549d 2024-11-20T14:46:58,643 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/.tmp/hbase.id 2024-11-20T14:46:58,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:46:58,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:46:58,657 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/.tmp/hbase.id]:[hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/hbase.id] 2024-11-20T14:46:58,671 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:46:58,671 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T14:46:58,673 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-20T14:46:58,680 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:46:58,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:46:58,689 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:46:58,690 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T14:46:58,690 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:46:58,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:46:58,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:46:58,704 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store 2024-11-20T14:46:58,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:46:58,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:46:58,712 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:46:58,712 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:46:58,712 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:58,712 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:58,712 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:46:58,712 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:58,712 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:46:58,712 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114018712Disabling compacts and flushes for region at 1732114018712Disabling writes for close at 1732114018712Writing region close event to WAL at 1732114018712Closed at 1732114018712 2024-11-20T14:46:58,713 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/.initializing 2024-11-20T14:46:58,713 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411 2024-11-20T14:46:58,717 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C33939%2C1732114018411, suffix=, logDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/oldWALs, maxLogs=10 2024-11-20T14:46:58,718 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 2024-11-20T14:46:58,723 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 2024-11-20T14:46:58,724 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41029:41029),(127.0.0.1/127.0.0.1:43855:43855)] 2024-11-20T14:46:58,725 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:46:58,725 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:46:58,725 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,725 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T14:46:58,733 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:58,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:58,734 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T14:46:58,735 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:58,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:46:58,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T14:46:58,738 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:58,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:46:58,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T14:46:58,741 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:58,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:46:58,742 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,742 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,743 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,745 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,745 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,745 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T14:46:58,747 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:46:58,751 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:46:58,752 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767913, jitterRate=-0.023548364639282227}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T14:46:58,753 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732114018726Initializing all the Stores at 1732114018727 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114018727Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114018730 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114018730Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114018730Cleaning up temporary data from old regions at 1732114018745 (+15 ms)Region opened successfully at 1732114018753 (+8 ms) 2024-11-20T14:46:58,754 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T14:46:58,760 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46339417, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:46:58,761 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T14:46:58,761 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T14:46:58,761 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T14:46:58,762 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T14:46:58,762 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T14:46:58,763 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T14:46:58,763 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T14:46:58,765 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T14:46:58,766 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T14:46:58,787 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T14:46:58,787 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T14:46:58,788 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T14:46:58,797 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T14:46:58,797 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T14:46:58,798 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T14:46:58,805 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T14:46:58,806 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T14:46:58,813 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T14:46:58,817 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T14:46:58,822 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T14:46:58,830 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:46:58,830 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:46:58,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,832 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a15ecfd95f4,33939,1732114018411, sessionid=0x1015a00031e0000, setting cluster-up flag (Was=false) 2024-11-20T14:46:58,847 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,872 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T14:46:58,874 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,33939,1732114018411 2024-11-20T14:46:58,897 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:58,922 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T14:46:58,924 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,33939,1732114018411 2024-11-20T14:46:58,931 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T14:46:58,936 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T14:46:58,937 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T14:46:58,937 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T14:46:58,937 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a15ecfd95f4,33939,1732114018411 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T14:46:58,939 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:46:58,939 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:46:58,939 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:46:58,939 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:46:58,939 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a15ecfd95f4:0, corePoolSize=10, maxPoolSize=10 2024-11-20T14:46:58,939 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:58,939 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:46:58,940 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:58,948 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:46:58,949 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T14:46:58,950 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:58,950 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T14:46:58,962 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732114048962 2024-11-20T14:46:58,963 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T14:46:58,963 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T14:46:58,963 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T14:46:58,963 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T14:46:58,963 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T14:46:58,963 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T14:46:58,966 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:58,969 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T14:46:58,969 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T14:46:58,969 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T14:46:58,969 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T14:46:58,969 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T14:46:58,972 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114018970,5,FailOnTimeoutGroup] 2024-11-20T14:46:58,972 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114018972,5,FailOnTimeoutGroup] 2024-11-20T14:46:58,972 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:58,972 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T14:46:58,972 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:58,972 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:58,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:46:58,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:46:58,975 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T14:46:58,975 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3 2024-11-20T14:46:58,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:46:58,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:46:58,996 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(746): ClusterId : 78f06c82-8a64-4de6-90f6-058630f4549d 2024-11-20T14:46:58,996 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T14:46:59,023 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T14:46:59,023 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T14:46:59,031 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T14:46:59,032 DEBUG [RS:0;1a15ecfd95f4:45919 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@753efdb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:46:59,049 DEBUG [RS:0;1a15ecfd95f4:45919 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a15ecfd95f4:45919 2024-11-20T14:46:59,049 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T14:46:59,049 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T14:46:59,049 DEBUG [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T14:46:59,050 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a15ecfd95f4,33939,1732114018411 with port=45919, startcode=1732114018559 2024-11-20T14:46:59,050 DEBUG [RS:0;1a15ecfd95f4:45919 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T14:46:59,053 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51639, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T14:46:59,053 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33939 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,053 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33939 {}] master.ServerManager(517): Registering regionserver=1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,056 DEBUG [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3 2024-11-20T14:46:59,056 DEBUG [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44451 2024-11-20T14:46:59,056 DEBUG [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T14:46:59,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:46:59,064 DEBUG [RS:0;1a15ecfd95f4:45919 {}] zookeeper.ZKUtil(111): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,064 WARN [RS:0;1a15ecfd95f4:45919 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:46:59,064 INFO [RS:0;1a15ecfd95f4:45919 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:46:59,065 DEBUG [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,065 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a15ecfd95f4,45919,1732114018559] 2024-11-20T14:46:59,068 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T14:46:59,074 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T14:46:59,075 INFO [RS:0;1a15ecfd95f4:45919 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:46:59,075 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,076 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T14:46:59,077 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T14:46:59,077 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,077 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,077 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,077 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,077 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,077 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,077 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:46:59,078 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,078 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,078 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,078 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,078 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,078 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:46:59,078 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:46:59,078 DEBUG [RS:0;1a15ecfd95f4:45919 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:46:59,079 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,079 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,079 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,079 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,079 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,079 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,45919,1732114018559-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:46:59,094 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T14:46:59,094 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,45919,1732114018559-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,094 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,094 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.Replication(171): 1a15ecfd95f4,45919,1732114018559 started 2024-11-20T14:46:59,110 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,110 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1482): Serving as 1a15ecfd95f4,45919,1732114018559, RpcServer on 1a15ecfd95f4/172.17.0.2:45919, sessionid=0x1015a00031e0001 2024-11-20T14:46:59,110 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T14:46:59,111 DEBUG [RS:0;1a15ecfd95f4:45919 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,111 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,45919,1732114018559' 2024-11-20T14:46:59,111 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T14:46:59,112 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T14:46:59,112 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T14:46:59,112 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T14:46:59,112 DEBUG [RS:0;1a15ecfd95f4:45919 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,112 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,45919,1732114018559' 2024-11-20T14:46:59,112 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T14:46:59,113 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T14:46:59,113 DEBUG [RS:0;1a15ecfd95f4:45919 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T14:46:59,113 INFO [RS:0;1a15ecfd95f4:45919 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T14:46:59,113 INFO [RS:0;1a15ecfd95f4:45919 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T14:46:59,216 INFO [RS:0;1a15ecfd95f4:45919 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C45919%2C1732114018559, suffix=, logDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs, maxLogs=32 2024-11-20T14:46:59,217 INFO [RS:0;1a15ecfd95f4:45919 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 2024-11-20T14:46:59,224 INFO [RS:0;1a15ecfd95f4:45919 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 2024-11-20T14:46:59,231 DEBUG [RS:0;1a15ecfd95f4:45919 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43855:43855),(127.0.0.1/127.0.0.1:41029:41029)] 2024-11-20T14:46:59,394 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:46:59,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:46:59,399 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:46:59,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:59,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:59,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:46:59,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:46:59,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:59,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:59,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:46:59,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:46:59,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:59,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:59,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:46:59,408 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:46:59,408 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:59,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:59,409 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:46:59,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740 2024-11-20T14:46:59,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740 2024-11-20T14:46:59,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:46:59,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:46:59,412 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:46:59,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:46:59,416 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:46:59,416 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815019, jitterRate=0.036350905895233154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:46:59,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732114019394Initializing all the Stores at 1732114019396 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114019396Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114019396Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114019396Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114019396Cleaning up temporary data from old regions at 1732114019412 (+16 ms)Region opened successfully at 1732114019417 (+5 ms) 2024-11-20T14:46:59,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:46:59,418 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:46:59,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:46:59,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:46:59,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:46:59,418 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:46:59,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114019417Disabling compacts and flushes for region at 1732114019417Disabling writes for close at 1732114019418 (+1 ms)Writing region close event to WAL at 1732114019418Closed at 1732114019418 2024-11-20T14:46:59,420 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:46:59,420 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T14:46:59,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T14:46:59,422 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:46:59,423 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T14:46:59,573 DEBUG [1a15ecfd95f4:33939 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T14:46:59,574 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,576 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,45919,1732114018559, state=OPENING 2024-11-20T14:46:59,630 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T14:46:59,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:59,638 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:46:59,640 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:46:59,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:46:59,640 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:46:59,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,45919,1732114018559}] 2024-11-20T14:46:59,795 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T14:46:59,798 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33391, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T14:46:59,804 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T14:46:59,804 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:46:59,806 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C45919%2C1732114018559.meta, suffix=.meta, logDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs, maxLogs=32 2024-11-20T14:46:59,807 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta 2024-11-20T14:46:59,813 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta 2024-11-20T14:46:59,814 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41029:41029),(127.0.0.1/127.0.0.1:43855:43855)] 2024-11-20T14:46:59,815 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:46:59,816 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T14:46:59,816 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T14:46:59,816 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T14:46:59,816 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T14:46:59,816 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:46:59,816 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T14:46:59,816 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T14:46:59,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:46:59,819 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:46:59,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:59,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:59,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:46:59,821 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:46:59,821 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:59,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:59,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:46:59,823 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:46:59,823 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:59,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:59,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:46:59,824 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:46:59,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:46:59,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:46:59,825 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:46:59,826 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740 2024-11-20T14:46:59,827 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740 2024-11-20T14:46:59,828 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:46:59,828 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:46:59,829 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:46:59,830 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:46:59,831 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694796, jitterRate=-0.11652135848999023}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:46:59,831 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T14:46:59,831 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732114019816Writing region info on filesystem at 1732114019816Initializing all the Stores at 1732114019817 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114019817Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114019818 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114019818Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114019818Cleaning up temporary data from old regions at 1732114019828 (+10 ms)Running coprocessor post-open hooks at 1732114019831 (+3 ms)Region opened successfully at 1732114019831 2024-11-20T14:46:59,832 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732114019795 2024-11-20T14:46:59,836 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T14:46:59,836 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T14:46:59,837 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,838 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,45919,1732114018559, state=OPEN 2024-11-20T14:46:59,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:46:59,878 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:46:59,879 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,45919,1732114018559 2024-11-20T14:46:59,879 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:46:59,879 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:46:59,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T14:46:59,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,45919,1732114018559 in 239 msec 2024-11-20T14:46:59,886 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T14:46:59,886 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 462 msec 2024-11-20T14:46:59,887 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:46:59,887 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T14:46:59,889 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:46:59,889 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,45919,1732114018559, seqNum=-1] 2024-11-20T14:46:59,889 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:46:59,891 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48547, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:46:59,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 961 msec 2024-11-20T14:46:59,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732114019898, completionTime=-1 2024-11-20T14:46:59,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T14:46:59,898 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-20T14:46:59,901 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-20T14:46:59,901 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732114079901 2024-11-20T14:46:59,901 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732114139901 2024-11-20T14:46:59,901 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-20T14:46:59,901 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,33939,1732114018411-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,901 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,33939,1732114018411-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,902 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,33939,1732114018411-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,902 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a15ecfd95f4:33939, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,902 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,902 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,904 DEBUG [master/1a15ecfd95f4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T14:46:59,906 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.267sec 2024-11-20T14:46:59,906 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T14:46:59,907 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T14:46:59,907 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T14:46:59,907 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T14:46:59,907 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T14:46:59,907 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,33939,1732114018411-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:46:59,907 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,33939,1732114018411-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T14:46:59,909 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T14:46:59,909 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T14:46:59,909 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,33939,1732114018411-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:46:59,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d1792a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:46:59,997 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a15ecfd95f4,33939,-1 for getting cluster id 2024-11-20T14:46:59,997 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T14:47:00,019 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '78f06c82-8a64-4de6-90f6-058630f4549d' 2024-11-20T14:47:00,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T14:47:00,020 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "78f06c82-8a64-4de6-90f6-058630f4549d" 2024-11-20T14:47:00,020 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7990b957, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:47:00,020 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a15ecfd95f4,33939,-1] 2024-11-20T14:47:00,020 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T14:47:00,021 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:00,022 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56226, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T14:47:00,023 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4766d278, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:47:00,024 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:47:00,025 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,45919,1732114018559, seqNum=-1] 2024-11-20T14:47:00,025 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:47:00,027 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58150, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:47:00,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a15ecfd95f4,33939,1732114018411 2024-11-20T14:47:00,029 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:00,032 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T14:47:00,050 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:47:00,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:00,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:00,050 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:47:00,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:00,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:47:00,050 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T14:47:00,051 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:47:00,051 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34813 2024-11-20T14:47:00,053 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34813 connecting to ZooKeeper ensemble=127.0.0.1:52514 2024-11-20T14:47:00,053 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:00,055 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:00,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348130x0, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:47:00,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:348130x0, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-20T14:47:00,081 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-20T14:47:00,081 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34813-0x1015a00031e0002 connected 2024-11-20T14:47:00,082 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T14:47:00,082 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T14:47:00,083 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T14:47:00,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:47:00,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34813 2024-11-20T14:47:00,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34813 2024-11-20T14:47:00,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34813 2024-11-20T14:47:00,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34813 2024-11-20T14:47:00,086 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34813 2024-11-20T14:47:00,092 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(746): ClusterId : 78f06c82-8a64-4de6-90f6-058630f4549d 2024-11-20T14:47:00,092 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T14:47:00,104 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T14:47:00,104 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T14:47:00,114 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T14:47:00,115 DEBUG [RS:1;1a15ecfd95f4:34813 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47d8087b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:47:00,129 DEBUG [RS:1;1a15ecfd95f4:34813 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;1a15ecfd95f4:34813 2024-11-20T14:47:00,130 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T14:47:00,130 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T14:47:00,130 DEBUG [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T14:47:00,131 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a15ecfd95f4,33939,1732114018411 with port=34813, startcode=1732114020050 2024-11-20T14:47:00,131 DEBUG [RS:1;1a15ecfd95f4:34813 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T14:47:00,132 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38403, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T14:47:00,133 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33939 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a15ecfd95f4,34813,1732114020050 2024-11-20T14:47:00,133 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33939 {}] master.ServerManager(517): Registering regionserver=1a15ecfd95f4,34813,1732114020050 2024-11-20T14:47:00,135 DEBUG [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3 2024-11-20T14:47:00,135 DEBUG [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44451 2024-11-20T14:47:00,135 DEBUG [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T14:47:00,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:47:00,145 DEBUG [RS:1;1a15ecfd95f4:34813 {}] zookeeper.ZKUtil(111): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a15ecfd95f4,34813,1732114020050 2024-11-20T14:47:00,145 WARN [RS:1;1a15ecfd95f4:34813 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:47:00,145 INFO [RS:1;1a15ecfd95f4:34813 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:47:00,145 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a15ecfd95f4,34813,1732114020050] 2024-11-20T14:47:00,146 DEBUG [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050 2024-11-20T14:47:00,149 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T14:47:00,151 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T14:47:00,152 INFO [RS:1;1a15ecfd95f4:34813 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:47:00,152 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,152 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T14:47:00,153 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T14:47:00,153 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,153 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,153 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:47:00,154 DEBUG [RS:1;1a15ecfd95f4:34813 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:47:00,167 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,167 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,167 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,167 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,167 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,167 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,34813,1732114020050-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:47:00,182 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T14:47:00,183 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,34813,1732114020050-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,183 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,183 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.Replication(171): 1a15ecfd95f4,34813,1732114020050 started 2024-11-20T14:47:00,198 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:00,199 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(1482): Serving as 1a15ecfd95f4,34813,1732114020050, RpcServer on 1a15ecfd95f4/172.17.0.2:34813, sessionid=0x1015a00031e0002 2024-11-20T14:47:00,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;1a15ecfd95f4:34813,5,FailOnTimeoutGroup] 2024-11-20T14:47:00,199 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T14:47:00,199 DEBUG [RS:1;1a15ecfd95f4:34813 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a15ecfd95f4,34813,1732114020050 2024-11-20T14:47:00,199 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,34813,1732114020050' 2024-11-20T14:47:00,199 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T14:47:00,199 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-20T14:47:00,199 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T14:47:00,200 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T14:47:00,201 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a15ecfd95f4,33939,1732114018411 2024-11-20T14:47:00,201 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@17865755 2024-11-20T14:47:00,201 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T14:47:00,203 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T14:47:00,203 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T14:47:00,203 DEBUG [RS:1;1a15ecfd95f4:34813 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a15ecfd95f4,34813,1732114020050 2024-11-20T14:47:00,203 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,34813,1732114020050' 2024-11-20T14:47:00,203 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T14:47:00,203 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56242, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T14:47:00,203 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T14:47:00,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33939 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T14:47:00,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33939 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T14:47:00,204 DEBUG [RS:1;1a15ecfd95f4:34813 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T14:47:00,204 INFO [RS:1;1a15ecfd95f4:34813 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T14:47:00,204 INFO [RS:1;1a15ecfd95f4:34813 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T14:47:00,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33939 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:47:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33939 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T14:47:00,207 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T14:47:00,207 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:00,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33939 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-20T14:47:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33939 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:47:00,209 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T14:47:00,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741835_1011 (size=393) 2024-11-20T14:47:00,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741835_1011 (size=393) 2024-11-20T14:47:00,218 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9067098e18e52091553316b207831dfb, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3 2024-11-20T14:47:00,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34309 is added to blk_1073741836_1012 (size=76) 2024-11-20T14:47:00,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38809 is added to blk_1073741836_1012 (size=76) 2024-11-20T14:47:00,237 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:47:00,237 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 9067098e18e52091553316b207831dfb, disabling compactions & flushes 2024-11-20T14:47:00,237 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:00,237 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:00,237 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. after waiting 0 ms 2024-11-20T14:47:00,237 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:00,237 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:00,237 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9067098e18e52091553316b207831dfb: Waiting for close lock at 1732114020237Disabling compacts and flushes for region at 1732114020237Disabling writes for close at 1732114020237Writing region close event to WAL at 1732114020237Closed at 1732114020237 2024-11-20T14:47:00,239 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T14:47:00,239 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732114020239"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732114020239"}]},"ts":"1732114020239"} 2024-11-20T14:47:00,242 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T14:47:00,243 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T14:47:00,244 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732114020243"}]},"ts":"1732114020243"} 2024-11-20T14:47:00,246 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-20T14:47:00,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9067098e18e52091553316b207831dfb, ASSIGN}] 2024-11-20T14:47:00,248 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9067098e18e52091553316b207831dfb, ASSIGN 2024-11-20T14:47:00,249 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9067098e18e52091553316b207831dfb, ASSIGN; state=OFFLINE, location=1a15ecfd95f4,45919,1732114018559; forceNewPlan=false, retain=false 2024-11-20T14:47:00,306 INFO [RS:1;1a15ecfd95f4:34813 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C34813%2C1732114020050, suffix=, logDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050, archiveDir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs, maxLogs=32 2024-11-20T14:47:00,311 INFO [RS:1;1a15ecfd95f4:34813 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 2024-11-20T14:47:00,324 INFO [RS:1;1a15ecfd95f4:34813 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 2024-11-20T14:47:00,324 DEBUG [RS:1;1a15ecfd95f4:34813 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43855:43855),(127.0.0.1/127.0.0.1:41029:41029)] 2024-11-20T14:47:00,400 INFO [1a15ecfd95f4:33939 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-20T14:47:00,401 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9067098e18e52091553316b207831dfb, regionState=OPENING, regionLocation=1a15ecfd95f4,45919,1732114018559 2024-11-20T14:47:00,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9067098e18e52091553316b207831dfb, ASSIGN because future has completed 2024-11-20T14:47:00,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9067098e18e52091553316b207831dfb, server=1a15ecfd95f4,45919,1732114018559}] 2024-11-20T14:47:00,565 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:00,566 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9067098e18e52091553316b207831dfb, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:47:00,567 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,567 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:47:00,567 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,567 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,569 INFO [StoreOpener-9067098e18e52091553316b207831dfb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,571 INFO [StoreOpener-9067098e18e52091553316b207831dfb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9067098e18e52091553316b207831dfb columnFamilyName info 2024-11-20T14:47:00,571 DEBUG [StoreOpener-9067098e18e52091553316b207831dfb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:00,572 INFO [StoreOpener-9067098e18e52091553316b207831dfb-1 {}] regionserver.HStore(327): Store=9067098e18e52091553316b207831dfb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:47:00,572 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,573 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,573 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,574 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,574 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,576 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,579 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:47:00,579 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9067098e18e52091553316b207831dfb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770177, jitterRate=-0.020670086145401}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T14:47:00,580 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9067098e18e52091553316b207831dfb 2024-11-20T14:47:00,580 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9067098e18e52091553316b207831dfb: Running coprocessor pre-open hook at 1732114020567Writing region info on filesystem at 1732114020567Initializing all the Stores at 1732114020568 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114020568Cleaning up temporary data from old regions at 1732114020574 (+6 ms)Running coprocessor post-open hooks at 1732114020580 (+6 ms)Region opened successfully at 1732114020580 2024-11-20T14:47:00,581 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb., pid=6, masterSystemTime=1732114020561 2024-11-20T14:47:00,584 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:00,584 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:00,585 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9067098e18e52091553316b207831dfb, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,45919,1732114018559 2024-11-20T14:47:00,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9067098e18e52091553316b207831dfb, server=1a15ecfd95f4,45919,1732114018559 because future has completed 2024-11-20T14:47:00,593 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T14:47:00,593 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9067098e18e52091553316b207831dfb, server=1a15ecfd95f4,45919,1732114018559 in 184 msec 2024-11-20T14:47:00,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T14:47:00,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=9067098e18e52091553316b207831dfb, ASSIGN in 347 msec 2024-11-20T14:47:00,598 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T14:47:00,598 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T14:47:00,598 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-20T14:47:00,598 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732114020598"}]},"ts":"1732114020598"} 2024-11-20T14:47:00,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-20T14:47:00,601 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-20T14:47:00,603 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T14:47:00,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 399 msec 2024-11-20T14:47:05,319 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:47:05,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:05,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:05,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:05,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:05,355 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-20T14:47:10,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33939 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:47:10,245 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-20T14:47:10,245 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-20T14:47:10,249 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T14:47:10,249 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:10,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:47:10,272 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:47:10,272 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:47:10,272 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:47:10,273 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:47:10,273 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@142b4537{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:47:10,273 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b135604{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:47:10,382 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@560cf715{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/java.io.tmpdir/jetty-localhost-38629-hadoop-hdfs-3_4_1-tests_jar-_-any-2575979913006204139/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:10,382 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d0c94cc{HTTP/1.1, (http/1.1)}{localhost:38629} 2024-11-20T14:47:10,382 INFO [Time-limited test {}] server.Server(415): Started @115582ms 2024-11-20T14:47:10,384 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:47:10,424 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:47:10,429 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:47:10,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:47:10,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:47:10,430 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:47:10,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2afb6183{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:47:10,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d5f200{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:47:10,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@652b5a51{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/java.io.tmpdir/jetty-localhost-38241-hadoop-hdfs-3_4_1-tests_jar-_-any-2578737925693872466/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:10,535 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1eb193b9{HTTP/1.1, (http/1.1)}{localhost:38241} 2024-11-20T14:47:10,535 INFO [Time-limited test {}] server.Server(415): Started @115734ms 2024-11-20T14:47:10,536 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:47:10,594 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:47:10,597 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:47:10,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:47:10,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:47:10,598 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:47:10,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c4b247{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:47:10,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10fdb31b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:47:10,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@89d91f0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/java.io.tmpdir/jetty-localhost-46299-hadoop-hdfs-3_4_1-tests_jar-_-any-6183545981730247895/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:10,705 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27fa50f0{HTTP/1.1, (http/1.1)}{localhost:46299} 2024-11-20T14:47:10,705 INFO [Time-limited test {}] server.Server(415): Started @115905ms 2024-11-20T14:47:10,707 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:47:11,374 WARN [Thread-860 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:11,374 WARN [Thread-861 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:11,393 WARN [Thread-802 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:47:11,397 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5bc18f965c8e1eb7 with lease ID 0x19b3ebfb16add78e: Processing first storage report for DS-59ef332b-58fa-4e54-ad52-bed331d4887e from datanode DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:47:11,397 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5bc18f965c8e1eb7 with lease ID 0x19b3ebfb16add78e: from storage DS-59ef332b-58fa-4e54-ad52-bed331d4887e node DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:11,397 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5bc18f965c8e1eb7 with lease ID 0x19b3ebfb16add78e: Processing first storage report for DS-0764ca13-a16e-476b-bbfb-15f9cbcbfae1 from datanode DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:47:11,397 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5bc18f965c8e1eb7 with lease ID 0x19b3ebfb16add78e: from storage DS-0764ca13-a16e-476b-bbfb-15f9cbcbfae1 node DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:11,646 WARN [Thread-872 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data7/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:11,646 WARN [Thread-873 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data8/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:11,664 WARN [Thread-824 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:47:11,666 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9608f2f65a9b0a6a with lease ID 0x19b3ebfb16add78f: Processing first storage report for DS-c3738f09-b982-4b4c-8d50-5d732a4c3000 from datanode DatanodeRegistration(127.0.0.1:44283, datanodeUuid=c8f1b05e-caea-444f-98d9-524884a318d7, infoPort=44575, infoSecurePort=0, ipcPort=33143, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:47:11,667 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9608f2f65a9b0a6a with lease ID 0x19b3ebfb16add78f: from storage DS-c3738f09-b982-4b4c-8d50-5d732a4c3000 node DatanodeRegistration(127.0.0.1:44283, datanodeUuid=c8f1b05e-caea-444f-98d9-524884a318d7, infoPort=44575, infoSecurePort=0, ipcPort=33143, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:11,667 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9608f2f65a9b0a6a with lease ID 0x19b3ebfb16add78f: Processing first storage report for DS-906e596b-25e3-4b0e-9e51-1deb86f840ee from datanode DatanodeRegistration(127.0.0.1:44283, datanodeUuid=c8f1b05e-caea-444f-98d9-524884a318d7, infoPort=44575, infoSecurePort=0, ipcPort=33143, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:47:11,667 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9608f2f65a9b0a6a with lease ID 0x19b3ebfb16add78f: from storage DS-906e596b-25e3-4b0e-9e51-1deb86f840ee node DatanodeRegistration(127.0.0.1:44283, datanodeUuid=c8f1b05e-caea-444f-98d9-524884a318d7, infoPort=44575, infoSecurePort=0, ipcPort=33143, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:11,768 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data9/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:11,769 WARN [Thread-884 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data10/current/BP-1562597703-172.17.0.2-1732114016751/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:11,789 WARN [Thread-846 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:47:11,791 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x433858e4f74eaaea with lease ID 0x19b3ebfb16add790: Processing first storage report for DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b from datanode DatanodeRegistration(127.0.0.1:42201, datanodeUuid=c5d518b5-3d41-40e8-9692-c84db295b8c1, infoPort=41921, infoSecurePort=0, ipcPort=42161, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:47:11,791 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x433858e4f74eaaea with lease ID 0x19b3ebfb16add790: from storage DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b node DatanodeRegistration(127.0.0.1:42201, datanodeUuid=c5d518b5-3d41-40e8-9692-c84db295b8c1, infoPort=41921, infoSecurePort=0, ipcPort=42161, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:11,791 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x433858e4f74eaaea with lease ID 0x19b3ebfb16add790: Processing first storage report for DS-e2f86260-af2e-48de-97ed-0f9928439d1a from datanode DatanodeRegistration(127.0.0.1:42201, datanodeUuid=c5d518b5-3d41-40e8-9692-c84db295b8c1, infoPort=41921, infoSecurePort=0, ipcPort=42161, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751) 2024-11-20T14:47:11,791 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x433858e4f74eaaea with lease ID 0x19b3ebfb16add790: from storage DS-e2f86260-af2e-48de-97ed-0f9928439d1a node DatanodeRegistration(127.0.0.1:42201, datanodeUuid=c5d518b5-3d41-40e8-9692-c84db295b8c1, infoPort=41921, infoSecurePort=0, ipcPort=42161, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:11,844 WARN [ResponseProcessor for block BP-1562597703-172.17.0.2-1732114016751:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1562597703-172.17.0.2-1732114016751:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,844 WARN [ResponseProcessor for block BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,844 WARN [ResponseProcessor for block BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,844 WARN [ResponseProcessor for block BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,845 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 block BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:11,845 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 block BP-1562597703-172.17.0.2-1732114016751:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:11,845 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta block BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:11,845 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 block BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:11,845 WARN [PacketResponder: BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34309] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,845 WARN [PacketResponder: BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34309] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,846 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291721640_22 at /127.0.0.1:35982 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35982 dst: /127.0.0.1:38809 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,846 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1803393000_22 at /127.0.0.1:42548 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34309:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42548 dst: /127.0.0.1:34309 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,846 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_291721640_22 at /127.0.0.1:42616 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34309:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42616 dst: /127.0.0.1:34309 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:35958 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35958 dst: /127.0.0.1:38809 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:42584 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34309:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42584 dst: /127.0.0.1:34309 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:42582 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34309:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42582 dst: /127.0.0.1:34309 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,848 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:35966 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35966 dst: /127.0.0.1:38809 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,848 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1803393000_22 at /127.0.0.1:35934 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35934 dst: /127.0.0.1:38809 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69bb1e39{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:11,850 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23bd1c1e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:47:11,850 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:47:11,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c42ea91{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:47:11,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32429ee4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,STOPPED} 2024-11-20T14:47:11,851 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:47:11,851 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:47:11,851 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:47:11,851 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1562597703-172.17.0.2-1732114016751 (Datanode Uuid 348bdec5-d7c6-496c-b942-4aee15d9f5c5) service to localhost/127.0.0.1:44451 2024-11-20T14:47:11,852 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data3/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:11,852 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data4/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:11,853 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:47:11,853 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@4a14e1a2 {}] datanode.DataXceiver(331): 127.0.0.1:38809:DataXceiver error processing unknown operation src: /127.0.0.1:39634 dst: /127.0.0.1:38809 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,853 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta block BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,853 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 block BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,853 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@e9d56b0 {}] datanode.DataXceiver(331): 127.0.0.1:38809:DataXceiver error processing unknown operation src: /127.0.0.1:39640 dst: /127.0.0.1:38809 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:11,854 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 block BP-1562597703-172.17.0.2-1732114016751:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,854 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 block BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74bd1c10{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:11,855 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66f62573{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:47:11,855 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:47:11,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2be5a5df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:47:11,856 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e068543{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,STOPPED} 2024-11-20T14:47:11,856 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:47:11,856 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1562597703-172.17.0.2-1732114016751 (Datanode Uuid fabd0784-db53-45e2-a49a-b995a1a822a6) service to localhost/127.0.0.1:44451 2024-11-20T14:47:11,856 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:47:11,857 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:47:11,857 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data1/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:11,857 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data2/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:11,857 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:47:11,861 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb., hostname=1a15ecfd95f4,45919,1732114018559, seqNum=2] 2024-11-20T14:47:11,863 ERROR [FSHLog-0-hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3-prefix:1a15ecfd95f4,45919,1732114018559 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,863 WARN [FSHLog-0-hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3-prefix:1a15ecfd95f4,45919,1732114018559 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,863 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C45919%2C1732114018559:(num 1732114019217) roll requested 2024-11-20T14:47:11,863 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C45919%2C1732114018559.1732114031863 2024-11-20T14:47:11,866 WARN [Thread-896 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,866 WARN [Thread-896 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:11,866 WARN [Thread-896 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741838_1018 2024-11-20T14:47:11,869 WARN [Thread-896 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:11,875 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:11,875 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:11,876 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:11,876 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:11,876 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:11,876 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114031863 2024-11-20T14:47:11,876 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,876 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:11,877 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-20T14:47:11,878 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-20T14:47:11,878 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 2024-11-20T14:47:11,879 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44575:44575),(127.0.0.1/127.0.0.1:41921:41921)] 2024-11-20T14:47:11,879 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 is not closed yet, will try archiving it next time 2024-11-20T14:47:11,881 WARN [IPC Server handler 0 on default port 44451 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-20T14:47:11,885 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 after 5ms 2024-11-20T14:47:12,156 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:12,691 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:13,879 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:13,881 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114031863 2024-11-20T14:47:13,881 WARN [ResponseProcessor for block BP-1562597703-172.17.0.2-1732114016751:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1562597703-172.17.0.2-1732114016751:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:13,882 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114031863 block BP-1562597703-172.17.0.2-1732114016751:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK], DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:13,882 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:46392 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:44283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46392 dst: /127.0.0.1:44283 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:13,883 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:57466 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:42201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57466 dst: /127.0.0.1:42201 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:13,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@652b5a51{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:13,919 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1eb193b9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:47:13,919 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:47:13,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d5f200{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:47:13,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2afb6183{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,STOPPED} 2024-11-20T14:47:13,921 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:47:13,921 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:47:13,921 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:47:13,921 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1562597703-172.17.0.2-1732114016751 (Datanode Uuid c8f1b05e-caea-444f-98d9-524884a318d7) service to localhost/127.0.0.1:44451 2024-11-20T14:47:13,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data7/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:13,922 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data8/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:13,923 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:47:14,156 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:14,692 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:15,880 WARN [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]] 2024-11-20T14:47:15,880 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:15,880 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C45919%2C1732114018559:(num 1732114031863) roll requested 2024-11-20T14:47:15,881 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C45919%2C1732114018559.1732114035881 2024-11-20T14:47:15,887 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 after 4008ms 2024-11-20T14:47:15,887 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:15,887 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:57488 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data10]'}, localName='127.0.0.1:42201', datanodeUuid='c5d518b5-3d41-40e8-9692-c84db295b8c1', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741840_1022 to mirror 127.0.0.1:44283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:15,887 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:15,887 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741840_1022 2024-11-20T14:47:15,888 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:57488 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T14:47:15,888 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:57488 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57488 dst: /127.0.0.1:42201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:15,888 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:15,891 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38809 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:15,891 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:57494 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data10]'}, localName='127.0.0.1:42201', datanodeUuid='c5d518b5-3d41-40e8-9692-c84db295b8c1', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741841_1023 to mirror 127.0.0.1:38809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:15,891 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:15,891 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741841_1023 2024-11-20T14:47:15,891 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:57494 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T14:47:15,891 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:57494 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:42201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57494 dst: /127.0.0.1:42201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:15,892 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:15,895 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34309 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:15,894 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37782 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741842_1024 to mirror 127.0.0.1:34309 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:15,895 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:15,895 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37782 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T14:47:15,895 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741842_1024 2024-11-20T14:47:15,895 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37782 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37782 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:15,896 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:15,900 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:15,900 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:15,900 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:15,901 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:15,901 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:15,901 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114031863 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114035881 2024-11-20T14:47:15,903 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41921:41921),(127.0.0.1/127.0.0.1:42103:42103)] 2024-11-20T14:47:15,903 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 is not closed yet, will try archiving it next time 2024-11-20T14:47:15,903 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114031863 is not closed yet, will try archiving it next time 2024-11-20T14:47:15,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741839_1021 (size=2431) 2024-11-20T14:47:15,928 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T14:47:16,157 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:16,304 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 is not closed yet, will try archiving it next time 2024-11-20T14:47:16,692 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:17,903 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:17,932 WARN [ResponseProcessor for block BP-1562597703-172.17.0.2-1732114016751:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1562597703-172.17.0.2-1732114016751:blk_1073741843_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:17,932 WARN [DataStreamer for file /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114035881 block BP-1562597703-172.17.0.2-1732114016751:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:17,933 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:57498 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:42201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57498 dst: /127.0.0.1:42201 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:17,933 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37796 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37796 dst: /127.0.0.1:46197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:17,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@89d91f0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:17,951 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27fa50f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:47:17,951 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:47:17,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10fdb31b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:47:17,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c4b247{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,STOPPED} 2024-11-20T14:47:17,952 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:47:17,952 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:47:17,952 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:47:17,952 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1562597703-172.17.0.2-1732114016751 (Datanode Uuid c5d518b5-3d41-40e8-9692-c84db295b8c1) service to localhost/127.0.0.1:44451 2024-11-20T14:47:17,953 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data9/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:17,953 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data10/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:17,954 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:47:17,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45919 {}] regionserver.HRegion(8855): Flush requested on 9067098e18e52091553316b207831dfb 2024-11-20T14:47:17,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9067098e18e52091553316b207831dfb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:47:17,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/3072ce086216446e8598999538029b58 is 1080, key is row0002/info:/1732114033924/Put/seqid=0 2024-11-20T14:47:17,986 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:17,986 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:17,986 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741844_1027 2024-11-20T14:47:17,987 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:17,988 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:17,988 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK], DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:17,989 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741845_1028 2024-11-20T14:47:17,989 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:17,991 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:17,991 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:17,991 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741846_1029 2024-11-20T14:47:17,992 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:17,994 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34309 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:17,994 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37812 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741847_1030 to mirror 127.0.0.1:34309 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:17,994 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:17,994 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741847_1030 2024-11-20T14:47:17,994 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37812 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:17,994 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37812 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37812 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:17,995 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:17,996 WARN [IPC Server handler 3 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:17,996 WARN [IPC Server handler 3 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:17,996 WARN [IPC Server handler 3 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:17,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741848_1031 (size=10347) 2024-11-20T14:47:18,157 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:18,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/3072ce086216446e8598999538029b58 2024-11-20T14:47:18,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/3072ce086216446e8598999538029b58 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/3072ce086216446e8598999538029b58 2024-11-20T14:47:18,410 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@406680c3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741848_1031 to 127.0.0.1:42201 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:18,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/3072ce086216446e8598999538029b58, entries=5, sequenceid=11, filesize=10.1 K 2024-11-20T14:47:18,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 9067098e18e52091553316b207831dfb in 451ms, sequenceid=11, compaction requested=false 2024-11-20T14:47:18,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9067098e18e52091553316b207831dfb: 2024-11-20T14:47:18,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45919 {}] regionserver.HRegion(8855): Flush requested on 9067098e18e52091553316b207831dfb 2024-11-20T14:47:18,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9067098e18e52091553316b207831dfb 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-20T14:47:18,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/d4224f66e1d04be1987bc55260c421bc is 1080, key is row0007/info:/1732114037965/Put/seqid=0 2024-11-20T14:47:18,599 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:18,600 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:18,600 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741849_1032 2024-11-20T14:47:18,600 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:18,602 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:18,602 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:18,602 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741850_1033 2024-11-20T14:47:18,603 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:18,604 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:18,605 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:18,605 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741851_1034 2024-11-20T14:47:18,605 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:18,607 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:18,607 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:18,607 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741852_1035 2024-11-20T14:47:18,608 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:18,608 WARN [IPC Server handler 1 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:18,608 WARN [IPC Server handler 1 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:18,608 WARN [IPC Server handler 1 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:18,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741853_1036 (size=12506) 2024-11-20T14:47:18,693 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:19,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/d4224f66e1d04be1987bc55260c421bc 2024-11-20T14:47:19,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/d4224f66e1d04be1987bc55260c421bc as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/d4224f66e1d04be1987bc55260c421bc 2024-11-20T14:47:19,033 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/d4224f66e1d04be1987bc55260c421bc, entries=7, sequenceid=24, filesize=12.2 K 2024-11-20T14:47:19,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 9067098e18e52091553316b207831dfb in 445ms, sequenceid=24, compaction requested=false 2024-11-20T14:47:19,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9067098e18e52091553316b207831dfb: 2024-11-20T14:47:19,036 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-20T14:47:19,036 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:19,036 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/d4224f66e1d04be1987bc55260c421bc because midkey is the same as first or last row 2024-11-20T14:47:19,903 WARN [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]] 2024-11-20T14:47:19,903 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:19,904 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C45919%2C1732114018559:(num 1732114035881) roll requested 2024-11-20T14:47:19,904 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C45919%2C1732114018559.1732114039904 2024-11-20T14:47:19,907 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:19,907 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK], DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:19,907 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741854_1037 2024-11-20T14:47:19,907 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:19,909 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:19,909 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37852 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741855_1038 to mirror 127.0.0.1:42201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:19,910 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:19,910 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741855_1038 2024-11-20T14:47:19,910 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37852 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T14:47:19,910 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37852 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37852 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:19,910 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:19,912 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:19,912 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:19,912 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741856_1039 2024-11-20T14:47:19,912 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:19,914 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:19,914 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:19,914 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741857_1040 2024-11-20T14:47:19,915 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:19,915 WARN [IPC Server handler 0 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:19,916 WARN [IPC Server handler 0 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:19,916 WARN [IPC Server handler 0 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:19,918 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:19,918 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:19,918 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:19,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:19,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:19,919 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114035881 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114039904 2024-11-20T14:47:19,920 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42103:42103)] 2024-11-20T14:47:19,920 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 is not closed yet, will try archiving it next time 2024-11-20T14:47:19,920 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114035881 is not closed yet, will try archiving it next time 2024-11-20T14:47:19,920 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114031863 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs/1a15ecfd95f4%2C45919%2C1732114018559.1732114031863 2024-11-20T14:47:19,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741843_1026 (size=25992) 2024-11-20T14:47:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45919 {}] regionserver.HRegion(8855): Flush requested on 9067098e18e52091553316b207831dfb 2024-11-20T14:47:20,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9067098e18e52091553316b207831dfb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T14:47:20,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/5359ff4aa0734d3fb88f7d2dac308e84 is 1079, key is tmprow/info:/1732114040010/Put/seqid=0 2024-11-20T14:47:20,019 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,019 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:20,019 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741859_1042 2024-11-20T14:47:20,020 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:20,021 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,021 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:20,021 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741860_1043 2024-11-20T14:47:20,022 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:20,023 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,024 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:20,024 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741861_1044 2024-11-20T14:47:20,024 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:20,027 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38809 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37878 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741862_1045 to mirror 127.0.0.1:38809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:20,027 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:20,027 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37878 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:20,027 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741862_1045 2024-11-20T14:47:20,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37878 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37878 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:20,028 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:20,029 WARN [IPC Server handler 2 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:20,029 WARN [IPC Server handler 2 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:20,029 WARN [IPC Server handler 2 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:20,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741863_1046 (size=6027) 2024-11-20T14:47:20,157 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,325 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 is not closed yet, will try archiving it next time 2024-11-20T14:47:20,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/5359ff4aa0734d3fb88f7d2dac308e84 2024-11-20T14:47:20,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/5359ff4aa0734d3fb88f7d2dac308e84 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/5359ff4aa0734d3fb88f7d2dac308e84 2024-11-20T14:47:20,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/5359ff4aa0734d3fb88f7d2dac308e84, entries=1, sequenceid=34, filesize=5.9 K 2024-11-20T14:47:20,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 9067098e18e52091553316b207831dfb in 443ms, sequenceid=34, compaction requested=true 2024-11-20T14:47:20,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9067098e18e52091553316b207831dfb: 2024-11-20T14:47:20,455 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-20T14:47:20,455 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:20,455 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/d4224f66e1d04be1987bc55260c421bc because midkey is the same as first or last row 2024-11-20T14:47:20,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9067098e18e52091553316b207831dfb:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:47:20,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:47:20,456 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:47:20,457 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:47:20,457 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HStore(1541): 9067098e18e52091553316b207831dfb/info is initiating minor compaction (all files) 2024-11-20T14:47:20,457 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9067098e18e52091553316b207831dfb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:20,458 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/3072ce086216446e8598999538029b58, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/d4224f66e1d04be1987bc55260c421bc, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/5359ff4aa0734d3fb88f7d2dac308e84] into tmpdir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp, totalSize=28.2 K 2024-11-20T14:47:20,458 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3072ce086216446e8598999538029b58, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732114033924 2024-11-20T14:47:20,458 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.Compactor(225): Compacting d4224f66e1d04be1987bc55260c421bc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732114037965 2024-11-20T14:47:20,459 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5359ff4aa0734d3fb88f7d2dac308e84, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732114040010 2024-11-20T14:47:20,471 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9067098e18e52091553316b207831dfb#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:47:20,471 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/129d0f748692489a94f6189509ae44c6 is 1080, key is row0002/info:/1732114033924/Put/seqid=0 2024-11-20T14:47:20,473 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,473 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:20,473 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741864_1047 2024-11-20T14:47:20,474 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:20,475 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,475 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:20,475 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741865_1048 2024-11-20T14:47:20,476 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:20,478 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37932 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741866_1049 to mirror 127.0.0.1:44283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:20,478 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,478 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37932 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:20,478 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:20,478 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741866_1049 2024-11-20T14:47:20,478 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37932 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37932 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:20,479 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:20,481 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38809 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,481 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37942 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741867_1050 to mirror 127.0.0.1:38809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:20,482 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:20,482 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741867_1050 2024-11-20T14:47:20,482 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37942 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:20,482 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37942 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37942 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:20,482 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:20,483 WARN [IPC Server handler 0 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:20,483 WARN [IPC Server handler 0 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:20,483 WARN [IPC Server handler 0 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:20,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741868_1051 (size=17994) 2024-11-20T14:47:20,693 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:20,894 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/129d0f748692489a94f6189509ae44c6 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6 2024-11-20T14:47:20,901 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9067098e18e52091553316b207831dfb/info of 9067098e18e52091553316b207831dfb into 129d0f748692489a94f6189509ae44c6(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:47:20,901 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9067098e18e52091553316b207831dfb: 2024-11-20T14:47:20,901 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb., storeName=9067098e18e52091553316b207831dfb/info, priority=13, startTime=1732114040455; duration=0sec 2024-11-20T14:47:20,901 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T14:47:20,901 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:20,901 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6 because midkey is the same as first or last row 2024-11-20T14:47:20,901 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T14:47:20,901 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:20,902 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6 because midkey is the same as first or last row 2024-11-20T14:47:20,902 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-20T14:47:20,902 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:20,902 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6 because midkey is the same as first or last row 2024-11-20T14:47:20,902 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:47:20,902 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9067098e18e52091553316b207831dfb:info 2024-11-20T14:47:21,398 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@406680c3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741853_1036 to 127.0.0.1:42201 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:21,399 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@60240c46[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741843_1026 to 127.0.0.1:38809 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45919 {}] regionserver.HRegion(8855): Flush requested on 9067098e18e52091553316b207831dfb 2024-11-20T14:47:21,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9067098e18e52091553316b207831dfb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T14:47:21,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/1b7c680e2afe4bab960f9e3b6be2f0c5 is 1079, key is tmprow/info:/1732114041433/Put/seqid=0 2024-11-20T14:47:21,443 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,443 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:21,443 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741869_1052 2024-11-20T14:47:21,444 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:21,446 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,446 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:21,446 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741870_1053 2024-11-20T14:47:21,447 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:21,450 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37956 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741871_1054 to mirror 127.0.0.1:42201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:21,450 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:21,450 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741871_1054 2024-11-20T14:47:21,450 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37956 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:21,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37956 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37956 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:21,451 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:21,454 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,454 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37962 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741872_1055 to mirror 127.0.0.1:44283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:21,455 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:21,455 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741872_1055 2024-11-20T14:47:21,455 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37962 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:21,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:37962 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37962 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:21,457 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:21,458 WARN [IPC Server handler 2 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:21,458 WARN [IPC Server handler 2 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:21,458 WARN [IPC Server handler 2 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:21,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741873_1056 (size=6027) 2024-11-20T14:47:21,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/1b7c680e2afe4bab960f9e3b6be2f0c5 2024-11-20T14:47:21,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/1b7c680e2afe4bab960f9e3b6be2f0c5 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/1b7c680e2afe4bab960f9e3b6be2f0c5 2024-11-20T14:47:21,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/1b7c680e2afe4bab960f9e3b6be2f0c5, entries=1, sequenceid=45, filesize=5.9 K 2024-11-20T14:47:21,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 9067098e18e52091553316b207831dfb in 447ms, sequenceid=45, compaction requested=false 2024-11-20T14:47:21,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9067098e18e52091553316b207831dfb: 2024-11-20T14:47:21,882 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-20T14:47:21,882 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:21,882 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6 because midkey is the same as first or last row 2024-11-20T14:47:21,920 WARN [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]] 2024-11-20T14:47:21,920 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,921 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C45919%2C1732114018559:(num 1732114039904) roll requested 2024-11-20T14:47:21,921 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C45919%2C1732114018559.1732114041921 2024-11-20T14:47:21,926 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38809 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,926 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54640 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741874_1057 to mirror 127.0.0.1:38809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:21,926 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:21,926 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741874_1057 2024-11-20T14:47:21,926 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54640 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T14:47:21,926 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54640 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54640 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:21,927 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:21,928 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,928 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:21,928 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741875_1058 2024-11-20T14:47:21,929 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:21,931 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,931 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:21,931 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741876_1059 2024-11-20T14:47:21,932 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:21,933 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:21,933 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:21,933 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741877_1060 2024-11-20T14:47:21,934 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:21,935 WARN [IPC Server handler 3 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:21,935 WARN [IPC Server handler 3 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:21,935 WARN [IPC Server handler 3 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:21,940 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:21,940 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:21,940 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:21,940 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:21,941 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:21,941 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114039904 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114041921 2024-11-20T14:47:21,941 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42103:42103)] 2024-11-20T14:47:21,941 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 is not closed yet, will try archiving it next time 2024-11-20T14:47:21,941 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114039904 is not closed yet, will try archiving it next time 2024-11-20T14:47:21,942 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114035881 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs/1a15ecfd95f4%2C45919%2C1732114018559.1732114035881 2024-11-20T14:47:21,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741858_1041 (size=13591) 2024-11-20T14:47:22,158 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:22,344 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 is not closed yet, will try archiving it next time 2024-11-20T14:47:22,398 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@60240c46[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741863_1046 to 127.0.0.1:34309 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:22,398 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@406680c3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741868_1051 to 127.0.0.1:34309 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:22,693 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:22,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45919 {}] regionserver.HRegion(8855): Flush requested on 9067098e18e52091553316b207831dfb 2024-11-20T14:47:22,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9067098e18e52091553316b207831dfb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T14:47:22,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/670ba78458f247cda482ac7fcb1454cd is 1079, key is tmprow/info:/1732114042857/Put/seqid=0 2024-11-20T14:47:22,872 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:22,872 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:22,872 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741879_1062 2024-11-20T14:47:22,873 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:22,874 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:22,875 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:22,875 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741880_1063 2024-11-20T14:47:22,875 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:22,878 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34309 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:22,878 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54660 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741881_1064 to mirror 127.0.0.1:34309 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:22,878 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:22,878 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741881_1064 2024-11-20T14:47:22,878 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54660 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:22,878 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54660 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54660 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:22,879 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:22,880 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:22,881 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:22,881 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741882_1065 2024-11-20T14:47:22,881 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:22,882 WARN [IPC Server handler 1 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:22,882 WARN [IPC Server handler 1 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:22,882 WARN [IPC Server handler 1 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:22,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741883_1066 (size=6027) 2024-11-20T14:47:23,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/670ba78458f247cda482ac7fcb1454cd 2024-11-20T14:47:23,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/670ba78458f247cda482ac7fcb1454cd as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/670ba78458f247cda482ac7fcb1454cd 2024-11-20T14:47:23,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/670ba78458f247cda482ac7fcb1454cd, entries=1, sequenceid=55, filesize=5.9 K 2024-11-20T14:47:23,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 9067098e18e52091553316b207831dfb in 445ms, sequenceid=55, compaction requested=true 2024-11-20T14:47:23,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9067098e18e52091553316b207831dfb: 2024-11-20T14:47:23,304 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-20T14:47:23,304 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:23,304 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6 because midkey is the same as first or last row 2024-11-20T14:47:23,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9067098e18e52091553316b207831dfb:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:47:23,305 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:47:23,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:47:23,306 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:47:23,306 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HStore(1541): 9067098e18e52091553316b207831dfb/info is initiating minor compaction (all files) 2024-11-20T14:47:23,306 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9067098e18e52091553316b207831dfb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:23,306 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/1b7c680e2afe4bab960f9e3b6be2f0c5, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/670ba78458f247cda482ac7fcb1454cd] into tmpdir=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp, totalSize=29.3 K 2024-11-20T14:47:23,307 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.Compactor(225): Compacting 129d0f748692489a94f6189509ae44c6, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732114033924 2024-11-20T14:47:23,307 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b7c680e2afe4bab960f9e3b6be2f0c5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732114041433 2024-11-20T14:47:23,308 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] compactions.Compactor(225): Compacting 670ba78458f247cda482ac7fcb1454cd, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732114042857 2024-11-20T14:47:23,329 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9067098e18e52091553316b207831dfb#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:47:23,330 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/fdf105e969734142a26a9dc05f54c1d7 is 1080, key is row0002/info:/1732114033924/Put/seqid=0 2024-11-20T14:47:23,331 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:23,332 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]) is bad. 2024-11-20T14:47:23,332 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741884_1067 2024-11-20T14:47:23,332 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34309,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK] 2024-11-20T14:47:23,335 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38809 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:23,335 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54674 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741885_1068 to mirror 127.0.0.1:38809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:23,335 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:23,335 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54674 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:23,335 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741885_1068 2024-11-20T14:47:23,335 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54674 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54674 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:23,336 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:23,338 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:23,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54676 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741886_1069 to mirror 127.0.0.1:42201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:23,339 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:23,339 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54676 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:23,339 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741886_1069 2024-11-20T14:47:23,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54676 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54676 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:23,339 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:23,341 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:23,341 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54682 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741887_1070 to mirror 127.0.0.1:44283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:23,342 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:23,342 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54682 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:23,342 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741887_1070 2024-11-20T14:47:23,342 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:54682 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54682 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:23,342 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:23,343 WARN [IPC Server handler 3 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-20T14:47:23,343 WARN [IPC Server handler 3 on default port 44451 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-20T14:47:23,343 WARN [IPC Server handler 3 on default port 44451 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-20T14:47:23,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741888_1071 (size=18097) 2024-11-20T14:47:23,355 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/fdf105e969734142a26a9dc05f54c1d7 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/fdf105e969734142a26a9dc05f54c1d7 2024-11-20T14:47:23,364 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9067098e18e52091553316b207831dfb/info of 9067098e18e52091553316b207831dfb into fdf105e969734142a26a9dc05f54c1d7(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9067098e18e52091553316b207831dfb: 2024-11-20T14:47:23,364 INFO [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb., storeName=9067098e18e52091553316b207831dfb/info, priority=13, startTime=1732114043305; duration=0sec 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/fdf105e969734142a26a9dc05f54c1d7 because midkey is the same as first or last row 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/fdf105e969734142a26a9dc05f54c1d7 because midkey is the same as first or last row 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/fdf105e969734142a26a9dc05f54c1d7 because midkey is the same as first or last row 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:47:23,364 DEBUG [RS:0;1a15ecfd95f4:45919-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9067098e18e52091553316b207831dfb:info 2024-11-20T14:47:23,942 WARN [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-20T14:47:23,942 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:24,086 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:47:24,092 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:47:24,095 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:47:24,095 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:47:24,095 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:47:24,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22bae5a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:47:24,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ddd152f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:47:24,158 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:24,190 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3aa0d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/java.io.tmpdir/jetty-localhost-45351-hadoop-hdfs-3_4_1-tests_jar-_-any-3589963623686053772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:24,190 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f0c7751{HTTP/1.1, (http/1.1)}{localhost:45351} 2024-11-20T14:47:24,190 INFO [Time-limited test {}] server.Server(415): Started @129390ms 2024-11-20T14:47:24,192 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:47:24,400 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@60240c46[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741858_1041 to 127.0.0.1:34309 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:24,401 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@406680c3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741873_1056 to 127.0.0.1:42201 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:24,578 WARN [Thread-986 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:47:24,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3589362aeb412359 with lease ID 0x19b3ebfb16add791: from storage DS-d454c320-7338-4438-87b7-611a2cf8c846 node DatanodeRegistration(127.0.0.1:43357, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=40149, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T14:47:24,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3589362aeb412359 with lease ID 0x19b3ebfb16add791: from storage DS-d7ea8a13-d8d9-498f-a5d0-a95cdb64bd31 node DatanodeRegistration(127.0.0.1:43357, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=40149, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:24,694 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:25,401 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@406680c3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741883_1066 to 127.0.0.1:44283 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:25,401 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@60240c46[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741888_1071 to 127.0.0.1:42201 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:25,942 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:26,158 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:26,694 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:27,943 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,159 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,389 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T14:47:28,695 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,963 ERROR [FSHLog-0-hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData-prefix:1a15ecfd95f4,33939,1732114018411 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,963 WARN [FSHLog-0-hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData-prefix:1a15ecfd95f4,33939,1732114018411 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,964 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C33939%2C1732114018411:(num 1732114018717) roll requested 2024-11-20T14:47:28,964 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C33939%2C1732114018411.1732114048964 2024-11-20T14:47:28,968 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44283 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,968 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1803393000_22 at /127.0.0.1:54998 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741889_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data4]'}, localName='127.0.0.1:43357', datanodeUuid='348bdec5-d7c6-496c-b942-4aee15d9f5c5', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741889_1072 to mirror 127.0.0.1:44283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:28,968 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43357,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK], DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK]) is bad. 2024-11-20T14:47:28,969 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1803393000_22 at /127.0.0.1:54998 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741889_1072] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-20T14:47:28,969 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741889_1072 2024-11-20T14:47:28,969 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1803393000_22 at /127.0.0.1:54998 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741889_1072] {}] datanode.DataXceiver(331): 127.0.0.1:43357:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54998 dst: /127.0.0.1:43357 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:28,969 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44283,DS-c3738f09-b982-4b4c-8d50-5d732a4c3000,DISK] 2024-11-20T14:47:28,971 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,971 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]) is bad. 2024-11-20T14:47:28,971 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741890_1073 2024-11-20T14:47:28,972 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK] 2024-11-20T14:47:28,973 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,973 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741891_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:28,973 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741891_1074 2024-11-20T14:47:28,974 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:28,977 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:28,977 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:28,978 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:28,978 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:28,978 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:28,978 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114048964 2024-11-20T14:47:28,978 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,978 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:28,979 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 2024-11-20T14:47:28,979 WARN [IPC Server handler 4 on default port 44451 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741830_1006 2024-11-20T14:47:28,979 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 after 0ms 2024-11-20T14:47:28,980 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40149:40149),(127.0.0.1/127.0.0.1:42103:42103)] 2024-11-20T14:47:28,980 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 is not closed yet, will try archiving it next time 2024-11-20T14:47:29,943 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:30,159 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:31,943 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:32,160 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:32,981 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 after 4002ms 2024-11-20T14:47:33,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:47:33,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741835_1011 (size=393) 2024-11-20T14:47:33,944 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:34,160 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:34,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:47:34,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:47:34,597 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@e3bfd34 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:38809,null,null]) java.net.ConnectException: Call From 1a15ecfd95f4/172.17.0.2 to localhost:39089 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T14:47:34,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741833_1020 (size=455) 2024-11-20T14:47:34,910 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs/1a15ecfd95f4%2C45919%2C1732114018559.1732114019217 2024-11-20T14:47:34,912 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114039904 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs/1a15ecfd95f4%2C45919%2C1732114018559.1732114039904 2024-11-20T14:47:35,944 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:36,160 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:36,584 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6e14027b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43357, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=40149, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741833_1020 to 127.0.0.1:42201 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:36,584 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f178af0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43357, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=40149, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741826_1002 to 127.0.0.1:44283 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:37,849 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C45919%2C1732114018559.1732114057849 2024-11-20T14:47:37,860 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:37,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:37,861 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:37,861 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:37,861 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:37,862 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114041921 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114057849 2024-11-20T14:47:37,863 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40149:40149),(127.0.0.1/127.0.0.1:42103:42103)] 2024-11-20T14:47:37,864 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114041921 is not closed yet, will try archiving it next time 2024-11-20T14:47:37,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741878_1061 (size=12911) 2024-11-20T14:47:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45919 {}] regionserver.HRegion(8855): Flush requested on 9067098e18e52091553316b207831dfb 2024-11-20T14:47:37,870 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9067098e18e52091553316b207831dfb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-20T14:47:37,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/6f60833a6330419ba0f49a2da3569f24 is 1080, key is row0013/info:/1732114057866/Put/seqid=0 2024-11-20T14:47:37,881 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:37,881 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:45970 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741894_1078 to mirror 127.0.0.1:42201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:37,881 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:37,881 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:45970 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:37,881 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741894_1078 2024-11-20T14:47:37,881 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-482735927_22 at /127.0.0.1:45970 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45970 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:37,882 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:37,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741895_1079 (size=8190) 2024-11-20T14:47:37,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741895_1079 (size=8190) 2024-11-20T14:47:37,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/6f60833a6330419ba0f49a2da3569f24 2024-11-20T14:47:37,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/6f60833a6330419ba0f49a2da3569f24 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/6f60833a6330419ba0f49a2da3569f24 2024-11-20T14:47:37,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/6f60833a6330419ba0f49a2da3569f24, entries=3, sequenceid=66, filesize=8.0 K 2024-11-20T14:47:37,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 9067098e18e52091553316b207831dfb in 36ms, sequenceid=66, compaction requested=false 2024-11-20T14:47:37,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9067098e18e52091553316b207831dfb: 2024-11-20T14:47:37,906 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-20T14:47:37,906 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:47:37,906 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/fdf105e969734142a26a9dc05f54c1d7 because midkey is the same as first or last row 2024-11-20T14:47:37,945 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-20T14:47:37,945 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T14:47:38,096 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:47:38,096 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:47:38,096 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:38,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:38,097 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T14:47:38,097 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T14:47:38,097 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=918571997, stopped=false 2024-11-20T14:47:38,098 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a15ecfd95f4,33939,1732114018411 2024-11-20T14:47:38,161 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:47:38,181 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:47:38,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:47:38,181 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:38,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:38,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:38,181 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:47:38,182 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:47:38,183 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:47:38,183 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:47:38,183 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:38,183 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:47:38,184 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:47:38,184 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a15ecfd95f4,45919,1732114018559' ***** 2024-11-20T14:47:38,184 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T14:47:38,184 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a15ecfd95f4,34813,1732114020050' ***** 2024-11-20T14:47:38,184 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T14:47:38,185 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T14:47:38,185 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T14:47:38,185 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T14:47:38,185 INFO [RS:0;1a15ecfd95f4:45919 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T14:47:38,186 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T14:47:38,186 INFO [RS:0;1a15ecfd95f4:45919 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T14:47:38,186 INFO [RS:1;1a15ecfd95f4:34813 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T14:47:38,186 INFO [RS:1;1a15ecfd95f4:34813 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T14:47:38,186 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(959): stopping server 1a15ecfd95f4,34813,1732114020050 2024-11-20T14:47:38,186 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(3091): Received CLOSE for 9067098e18e52091553316b207831dfb 2024-11-20T14:47:38,186 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:47:38,186 INFO [RS:1;1a15ecfd95f4:34813 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;1a15ecfd95f4:34813. 2024-11-20T14:47:38,187 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(959): stopping server 1a15ecfd95f4,45919,1732114018559 2024-11-20T14:47:38,187 DEBUG [RS:1;1a15ecfd95f4:34813 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:47:38,187 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:47:38,187 DEBUG [RS:1;1a15ecfd95f4:34813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:38,187 INFO [RS:0;1a15ecfd95f4:45919 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a15ecfd95f4:45919. 2024-11-20T14:47:38,187 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(976): stopping server 1a15ecfd95f4,34813,1732114020050; all regions closed. 2024-11-20T14:47:38,187 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9067098e18e52091553316b207831dfb, disabling compactions & flushes 2024-11-20T14:47:38,187 DEBUG [RS:0;1a15ecfd95f4:45919 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:47:38,187 DEBUG [RS:0;1a15ecfd95f4:45919 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:38,187 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:38,188 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:38,188 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T14:47:38,188 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T14:47:38,188 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. after waiting 0 ms 2024-11-20T14:47:38,188 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T14:47:38,188 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:38,188 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T14:47:38,188 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,188 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9067098e18e52091553316b207831dfb 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-20T14:47:38,188 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T14:47:38,188 DEBUG [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 9067098e18e52091553316b207831dfb=TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.} 2024-11-20T14:47:38,188 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,188 DEBUG [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9067098e18e52091553316b207831dfb 2024-11-20T14:47:38,188 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,188 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:47:38,188 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:47:38,188 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:47:38,188 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:47:38,188 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:47:38,189 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-20T14:47:38,189 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,189 ERROR [FSHLog-0-hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3-prefix:1a15ecfd95f4,45919,1732114018559.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,189 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,189 WARN [FSHLog-0-hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3-prefix:1a15ecfd95f4,45919,1732114018559.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,189 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,189 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C45919%2C1732114018559.meta:.meta(num 1732114019807) roll requested 2024-11-20T14:47:38,189 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 2024-11-20T14:47:38,189 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114058189.meta 2024-11-20T14:47:38,190 WARN [IPC Server handler 0 on default port 44451 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-20T14:47:38,190 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 after 1ms 2024-11-20T14:47:38,193 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/acdc97add46c4246a3fe0b89c56fac7b is 1080, key is row0015/info:/1732114057872/Put/seqid=0 2024-11-20T14:47:38,195 WARN [Thread-1039 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,195 WARN [Thread-1039 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:38,195 WARN [Thread-1039 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741897_1082 2024-11-20T14:47:38,196 WARN [Thread-1039 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:38,196 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,196 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,196 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,197 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,197 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,197 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114058189.meta 2024-11-20T14:47:38,197 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,197 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38809,DS-92885184-7247-45fe-abdb-2be01c84b7e4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,197 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta 2024-11-20T14:47:38,198 WARN [IPC Server handler 0 on default port 44451 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741834_1010 2024-11-20T14:47:38,198 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta after 1ms 2024-11-20T14:47:38,203 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40149:40149),(127.0.0.1/127.0.0.1:42103:42103)] 2024-11-20T14:47:38,203 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta is not closed yet, will try archiving it next time 2024-11-20T14:47:38,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741898_1083 (size=14660) 2024-11-20T14:47:38,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741898_1083 (size=14660) 2024-11-20T14:47:38,206 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/acdc97add46c4246a3fe0b89c56fac7b 2024-11-20T14:47:38,213 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/.tmp/info/acdc97add46c4246a3fe0b89c56fac7b as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/acdc97add46c4246a3fe0b89c56fac7b 2024-11-20T14:47:38,220 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/acdc97add46c4246a3fe0b89c56fac7b, entries=9, sequenceid=78, filesize=14.3 K 2024-11-20T14:47:38,220 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/info/540af494cce64127a39dd09fbb766c90 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb./info:regioninfo/1732114020585/Put/seqid=0 2024-11-20T14:47:38,221 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 9067098e18e52091553316b207831dfb in 33ms, sequenceid=78, compaction requested=true 2024-11-20T14:47:38,222 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/3072ce086216446e8598999538029b58, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/d4224f66e1d04be1987bc55260c421bc, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/5359ff4aa0734d3fb88f7d2dac308e84, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/1b7c680e2afe4bab960f9e3b6be2f0c5, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/670ba78458f247cda482ac7fcb1454cd] to archive 2024-11-20T14:47:38,223 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T14:47:38,223 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:38,223 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:38,223 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741899_1085 2024-11-20T14:47:38,224 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:38,225 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/3072ce086216446e8598999538029b58 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/3072ce086216446e8598999538029b58 2024-11-20T14:47:38,227 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/d4224f66e1d04be1987bc55260c421bc to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/d4224f66e1d04be1987bc55260c421bc 2024-11-20T14:47:38,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741900_1086 (size=7089) 2024-11-20T14:47:38,229 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/129d0f748692489a94f6189509ae44c6 2024-11-20T14:47:38,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741900_1086 (size=7089) 2024-11-20T14:47:38,229 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/info/540af494cce64127a39dd09fbb766c90 2024-11-20T14:47:38,231 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/5359ff4aa0734d3fb88f7d2dac308e84 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/5359ff4aa0734d3fb88f7d2dac308e84 2024-11-20T14:47:38,232 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/1b7c680e2afe4bab960f9e3b6be2f0c5 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/1b7c680e2afe4bab960f9e3b6be2f0c5 2024-11-20T14:47:38,233 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/670ba78458f247cda482ac7fcb1454cd to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/info/670ba78458f247cda482ac7fcb1454cd 2024-11-20T14:47:38,234 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a15ecfd95f4:33939 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-20T14:47:38,235 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3072ce086216446e8598999538029b58=10347, d4224f66e1d04be1987bc55260c421bc=12506, 129d0f748692489a94f6189509ae44c6=17994, 5359ff4aa0734d3fb88f7d2dac308e84=6027, 1b7c680e2afe4bab960f9e3b6be2f0c5=6027, 670ba78458f247cda482ac7fcb1454cd=6027] 2024-11-20T14:47:38,243 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/9067098e18e52091553316b207831dfb/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-20T14:47:38,244 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:38,244 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9067098e18e52091553316b207831dfb: Waiting for close lock at 1732114058187Running coprocessor pre-close hooks at 1732114058187Disabling compacts and flushes for region at 1732114058187Disabling writes for close at 1732114058188 (+1 ms)Obtaining lock to block concurrent updates at 1732114058188Preparing flush snapshotting stores in 9067098e18e52091553316b207831dfb at 1732114058188Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732114058189 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. at 1732114058189Flushing 9067098e18e52091553316b207831dfb/info: creating writer at 1732114058189Flushing 9067098e18e52091553316b207831dfb/info: appending metadata at 1732114058193 (+4 ms)Flushing 9067098e18e52091553316b207831dfb/info: closing flushed file at 1732114058193Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@580ef12c: reopening flushed file at 1732114058212 (+19 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 9067098e18e52091553316b207831dfb in 33ms, sequenceid=78, compaction requested=true at 1732114058221 (+9 ms)Writing region close event to WAL at 1732114058235 (+14 ms)Running coprocessor post-close hooks at 1732114058244 (+9 ms)Closed at 1732114058244 2024-11-20T14:47:38,245 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732114020203.9067098e18e52091553316b207831dfb. 2024-11-20T14:47:38,250 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/ns/a1d55b2cec26475694c5c1dac8a1f1a5 is 43, key is default/ns:d/1732114019892/Put/seqid=0 2024-11-20T14:47:38,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741901_1087 (size=5153) 2024-11-20T14:47:38,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741901_1087 (size=5153) 2024-11-20T14:47:38,256 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/ns/a1d55b2cec26475694c5c1dac8a1f1a5 2024-11-20T14:47:38,266 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.1732114041921 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs/1a15ecfd95f4%2C45919%2C1732114018559.1732114041921 2024-11-20T14:47:38,275 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/table/c9fa7ea6154c4fd89090611c86485b1e is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732114020598/Put/seqid=0 2024-11-20T14:47:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741902_1088 (size=5424) 2024-11-20T14:47:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741902_1088 (size=5424) 2024-11-20T14:47:38,281 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/table/c9fa7ea6154c4fd89090611c86485b1e 2024-11-20T14:47:38,287 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/info/540af494cce64127a39dd09fbb766c90 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/info/540af494cce64127a39dd09fbb766c90 2024-11-20T14:47:38,294 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/info/540af494cce64127a39dd09fbb766c90, entries=10, sequenceid=11, filesize=6.9 K 2024-11-20T14:47:38,295 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/ns/a1d55b2cec26475694c5c1dac8a1f1a5 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/ns/a1d55b2cec26475694c5c1dac8a1f1a5 2024-11-20T14:47:38,301 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/ns/a1d55b2cec26475694c5c1dac8a1f1a5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T14:47:38,302 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/.tmp/table/c9fa7ea6154c4fd89090611c86485b1e as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/table/c9fa7ea6154c4fd89090611c86485b1e 2024-11-20T14:47:38,309 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/table/c9fa7ea6154c4fd89090611c86485b1e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T14:47:38,310 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false 2024-11-20T14:47:38,317 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T14:47:38,318 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:47:38,318 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:47:38,318 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114058188Running coprocessor pre-close hooks at 1732114058188Disabling compacts and flushes for region at 1732114058188Disabling writes for close at 1732114058188Obtaining lock to block concurrent updates at 1732114058189 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732114058189Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732114058189Flushing stores of hbase:meta,,1.1588230740 at 1732114058204 (+15 ms)Flushing 1588230740/info: creating writer at 1732114058204Flushing 1588230740/info: appending metadata at 1732114058220 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732114058220Flushing 1588230740/ns: creating writer at 1732114058235 (+15 ms)Flushing 1588230740/ns: appending metadata at 1732114058249 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732114058249Flushing 1588230740/table: creating writer at 1732114058262 (+13 ms)Flushing 1588230740/table: appending metadata at 1732114058275 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732114058275Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38a6e93c: reopening flushed file at 1732114058286 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d528397: reopening flushed file at 1732114058294 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1180c35d: reopening flushed file at 1732114058301 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false at 1732114058310 (+9 ms)Writing region close event to WAL at 1732114058312 (+2 ms)Running coprocessor post-close hooks at 1732114058318 (+6 ms)Closed at 1732114058318 2024-11-20T14:47:38,319 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T14:47:38,388 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(976): stopping server 1a15ecfd95f4,45919,1732114018559; all regions closed. 2024-11-20T14:47:38,389 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,389 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,389 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,389 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,389 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:38,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741896_1081 (size=825) 2024-11-20T14:47:38,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741896_1081 (size=825) 2024-11-20T14:47:39,079 INFO [regionserver/1a15ecfd95f4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T14:47:39,080 INFO [regionserver/1a15ecfd95f4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T14:47:39,081 INFO [regionserver/1a15ecfd95f4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:47:39,258 INFO [regionserver/1a15ecfd95f4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T14:47:39,259 INFO [regionserver/1a15ecfd95f4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T14:47:39,402 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@60240c46[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46197, datanodeUuid=b7440182-7070-42cd-94ff-3c29bebbf27a, infoPort=42103, infoSecurePort=0, ipcPort=33725, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741878_1061 to 127.0.0.1:42201 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:39,586 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f178af0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43357, datanodeUuid=348bdec5-d7c6-496c-b942-4aee15d9f5c5, infoPort=40149, infoSecurePort=0, ipcPort=43043, storageInfo=lv=-57;cid=testClusterID;nsid=954696448;c=1732114016751):Failed to transfer BP-1562597703-172.17.0.2-1732114016751:blk_1073741825_1001 to 127.0.0.1:42201 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:39,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741836_1012 (size=76) 2024-11-20T14:47:39,912 INFO [master/1a15ecfd95f4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T14:47:39,912 INFO [master/1a15ecfd95f4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T14:47:40,169 INFO [regionserver/1a15ecfd95f4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:47:40,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:47:40,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:47:40,598 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-20T14:47:40,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:47:40,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T14:47:42,191 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 after 4002ms 2024-11-20T14:47:42,200 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta after 4002ms 2024-11-20T14:47:43,190 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T14:47:43,194 DEBUG [RS:1;1a15ecfd95f4:34813 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs 2024-11-20T14:47:43,194 INFO [RS:1;1a15ecfd95f4:34813 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C34813%2C1732114020050:(num 1732114020311) 2024-11-20T14:47:43,194 DEBUG [RS:1;1a15ecfd95f4:34813 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:43,194 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:47:43,195 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:47:43,195 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.ChoreService(370): Chore service for: regionserver/1a15ecfd95f4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T14:47:43,195 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T14:47:43,196 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T14:47:43,196 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:47:43,196 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T14:47:43,196 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:47:43,196 INFO [RS:1;1a15ecfd95f4:34813 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34813 2024-11-20T14:47:43,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a15ecfd95f4,34813,1732114020050 2024-11-20T14:47:43,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:47:43,225 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:47:43,235 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a15ecfd95f4,34813,1732114020050] 2024-11-20T14:47:43,244 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a15ecfd95f4,34813,1732114020050 already deleted, retry=false 2024-11-20T14:47:43,244 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a15ecfd95f4,34813,1732114020050 expired; onlineServers=1 2024-11-20T14:47:43,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:47:43,336 INFO [RS:1;1a15ecfd95f4:34813 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:47:43,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34813-0x1015a00031e0002, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:47:43,336 INFO [RS:1;1a15ecfd95f4:34813 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a15ecfd95f4,34813,1732114020050; zookeeper connection closed. 2024-11-20T14:47:43,336 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10bb86a8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10bb86a8 2024-11-20T14:47:43,390 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-20T14:47:43,396 DEBUG [RS:0;1a15ecfd95f4:45919 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs 2024-11-20T14:47:43,396 INFO [RS:0;1a15ecfd95f4:45919 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C45919%2C1732114018559.meta:.meta(num 1732114058189) 2024-11-20T14:47:43,397 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:43,397 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:43,398 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:43,398 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:43,398 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:43,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741893_1077 (size=14682) 2024-11-20T14:47:43,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741893_1077 (size=14682) 2024-11-20T14:47:43,778 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:47:43,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:43,832 DEBUG [RS:0;1a15ecfd95f4:45919 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs 2024-11-20T14:47:43,833 INFO [RS:0;1a15ecfd95f4:45919 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C45919%2C1732114018559:(num 1732114057849) 2024-11-20T14:47:43,833 DEBUG [RS:0;1a15ecfd95f4:45919 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:43,833 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:47:43,833 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:47:43,833 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.ChoreService(370): Chore service for: regionserver/1a15ecfd95f4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-20T14:47:43,833 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:47:43,833 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:47:43,833 INFO [RS:0;1a15ecfd95f4:45919 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45919 2024-11-20T14:47:43,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:47:43,868 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a15ecfd95f4,45919,1732114018559 2024-11-20T14:47:43,868 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:47:43,876 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a15ecfd95f4,45919,1732114018559] 2024-11-20T14:47:43,885 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a15ecfd95f4,45919,1732114018559 already deleted, retry=false 2024-11-20T14:47:43,885 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a15ecfd95f4,45919,1732114018559 expired; onlineServers=0 2024-11-20T14:47:43,885 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a15ecfd95f4,33939,1732114018411' ***** 2024-11-20T14:47:43,885 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T14:47:43,885 INFO [M:0;1a15ecfd95f4:33939 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:47:43,885 INFO [M:0;1a15ecfd95f4:33939 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:47:43,885 DEBUG [M:0;1a15ecfd95f4:33939 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T14:47:43,885 DEBUG [M:0;1a15ecfd95f4:33939 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T14:47:43,885 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114018972 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114018972,5,FailOnTimeoutGroup] 2024-11-20T14:47:43,885 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T14:47:43,885 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114018970 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114018970,5,FailOnTimeoutGroup] 2024-11-20T14:47:43,886 INFO [M:0;1a15ecfd95f4:33939 {}] hbase.ChoreService(370): Chore service for: master/1a15ecfd95f4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T14:47:43,886 INFO [M:0;1a15ecfd95f4:33939 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:47:43,886 DEBUG [M:0;1a15ecfd95f4:33939 {}] master.HMaster(1795): Stopping service threads 2024-11-20T14:47:43,886 INFO [M:0;1a15ecfd95f4:33939 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T14:47:43,886 INFO [M:0;1a15ecfd95f4:33939 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:47:43,886 INFO [M:0;1a15ecfd95f4:33939 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T14:47:43,886 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T14:47:43,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T14:47:43,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:43,893 DEBUG [M:0;1a15ecfd95f4:33939 {}] zookeeper.ZKUtil(347): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T14:47:43,893 WARN [M:0;1a15ecfd95f4:33939 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T14:47:43,894 INFO [M:0;1a15ecfd95f4:33939 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/.lastflushedseqids 2024-11-20T14:47:43,898 WARN [Thread-1075 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:43,898 WARN [Thread-1075 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK], DatanodeInfoWithStorage[127.0.0.1:43357,DS-d454c320-7338-4438-87b7-611a2cf8c846,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:43,898 WARN [Thread-1075 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741903_1089 2024-11-20T14:47:43,899 WARN [Thread-1075 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:43,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741904_1090 (size=130) 2024-11-20T14:47:43,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741904_1090 (size=130) 2024-11-20T14:47:43,907 INFO [M:0;1a15ecfd95f4:33939 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T14:47:43,907 INFO [M:0;1a15ecfd95f4:33939 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T14:47:43,907 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:47:43,908 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:47:43,908 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:47:43,908 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:47:43,908 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:47:43,908 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-20T14:47:43,927 DEBUG [M:0;1a15ecfd95f4:33939 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/488f7ceba27041f1a1812bcfc2b29d3b is 82, key is hbase:meta,,1/info:regioninfo/1732114019837/Put/seqid=0 2024-11-20T14:47:43,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741905_1091 (size=5672) 2024-11-20T14:47:43,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741905_1091 (size=5672) 2024-11-20T14:47:43,932 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/488f7ceba27041f1a1812bcfc2b29d3b 2024-11-20T14:47:43,952 DEBUG [M:0;1a15ecfd95f4:33939 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3911a9c5215f400592aabca6de7e225e is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732114020605/Put/seqid=0 2024-11-20T14:47:43,955 WARN [Thread-1088 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42201 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:47:43,955 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1803393000_22 at /127.0.0.1:44884 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741906_1092] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6]'}, localName='127.0.0.1:46197', datanodeUuid='b7440182-7070-42cd-94ff-3c29bebbf27a', xmitsInProgress=0}:Exception transferring block BP-1562597703-172.17.0.2-1732114016751:blk_1073741906_1092 to mirror 127.0.0.1:42201 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:43,955 WARN [Thread-1088 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1562597703-172.17.0.2-1732114016751:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46197,DS-59ef332b-58fa-4e54-ad52-bed331d4887e,DISK], DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK]) is bad. 2024-11-20T14:47:43,955 WARN [Thread-1088 {}] hdfs.DataStreamer(1850): Abandoning BP-1562597703-172.17.0.2-1732114016751:blk_1073741906_1092 2024-11-20T14:47:43,955 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1803393000_22 at /127.0.0.1:44884 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741906_1092] {}] datanode.BlockReceiver(316): Block 1073741906 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-20T14:47:43,955 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1803393000_22 at /127.0.0.1:44884 [Receiving block BP-1562597703-172.17.0.2-1732114016751:blk_1073741906_1092] {}] datanode.DataXceiver(331): 127.0.0.1:46197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44884 dst: /127.0.0.1:46197 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:43,955 WARN [Thread-1088 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42201,DS-f40501a3-e6c9-478e-a1c2-8d46a54b387b,DISK] 2024-11-20T14:47:43,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741907_1093 (size=6256) 2024-11-20T14:47:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741907_1093 (size=6256) 2024-11-20T14:47:43,960 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3911a9c5215f400592aabca6de7e225e 2024-11-20T14:47:43,965 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3911a9c5215f400592aabca6de7e225e 2024-11-20T14:47:43,976 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:47:43,976 INFO [RS:0;1a15ecfd95f4:45919 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:47:43,977 INFO [RS:0;1a15ecfd95f4:45919 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a15ecfd95f4,45919,1732114018559; zookeeper connection closed. 2024-11-20T14:47:43,977 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45919-0x1015a00031e0001, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:47:43,977 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@355be7ab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@355be7ab 2024-11-20T14:47:43,977 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-20T14:47:43,979 DEBUG [M:0;1a15ecfd95f4:33939 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69da221433e24905aebcdf4ad14994fb is 69, key is 1a15ecfd95f4,34813,1732114020050/rs:state/1732114020133/Put/seqid=0 2024-11-20T14:47:43,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741908_1094 (size=5224) 2024-11-20T14:47:43,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741908_1094 (size=5224) 2024-11-20T14:47:43,985 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69da221433e24905aebcdf4ad14994fb 2024-11-20T14:47:44,010 DEBUG [M:0;1a15ecfd95f4:33939 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c5b827733d0d4cbd8a23534b3104e064 is 52, key is load_balancer_on/state:d/1732114020031/Put/seqid=0 2024-11-20T14:47:44,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741909_1095 (size=5056) 2024-11-20T14:47:44,017 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c5b827733d0d4cbd8a23534b3104e064 2024-11-20T14:47:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741909_1095 (size=5056) 2024-11-20T14:47:44,023 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/488f7ceba27041f1a1812bcfc2b29d3b as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/488f7ceba27041f1a1812bcfc2b29d3b 2024-11-20T14:47:44,030 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/488f7ceba27041f1a1812bcfc2b29d3b, entries=8, sequenceid=60, filesize=5.5 K 2024-11-20T14:47:44,031 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3911a9c5215f400592aabca6de7e225e as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3911a9c5215f400592aabca6de7e225e 2024-11-20T14:47:44,037 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3911a9c5215f400592aabca6de7e225e 2024-11-20T14:47:44,037 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3911a9c5215f400592aabca6de7e225e, entries=6, sequenceid=60, filesize=6.1 K 2024-11-20T14:47:44,038 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69da221433e24905aebcdf4ad14994fb as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/69da221433e24905aebcdf4ad14994fb 2024-11-20T14:47:44,043 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/69da221433e24905aebcdf4ad14994fb, entries=2, sequenceid=60, filesize=5.1 K 2024-11-20T14:47:44,044 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c5b827733d0d4cbd8a23534b3104e064 as hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c5b827733d0d4cbd8a23534b3104e064 2024-11-20T14:47:44,050 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c5b827733d0d4cbd8a23534b3104e064, entries=1, sequenceid=60, filesize=4.9 K 2024-11-20T14:47:44,051 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=60, compaction requested=false 2024-11-20T14:47:44,052 INFO [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:47:44,052 DEBUG [M:0;1a15ecfd95f4:33939 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114063907Disabling compacts and flushes for region at 1732114063907Disabling writes for close at 1732114063908 (+1 ms)Obtaining lock to block concurrent updates at 1732114063908Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732114063908Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1732114063908Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732114063909 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732114063909Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732114063926 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732114063926Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732114063937 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732114063951 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732114063951Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732114063965 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732114063979 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732114063979Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732114063990 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732114064009 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732114064009Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c5eaf4e: reopening flushed file at 1732114064022 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@272e4037: reopening flushed file at 1732114064030 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a06e02b: reopening flushed file at 1732114064037 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e2bd5a1: reopening flushed file at 1732114064043 (+6 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=60, compaction requested=false at 1732114064051 (+8 ms)Writing region close event to WAL at 1732114064052 (+1 ms)Closed at 1732114064052 2024-11-20T14:47:44,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:44,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:44,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:44,053 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:44,053 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:47:44,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46197 is added to blk_1073741892_1075 (size=1045) 2024-11-20T14:47:44,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43357 is added to blk_1073741892_1075 (size=1045) 2024-11-20T14:47:44,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:44,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:44,603 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@47b99e9d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1562597703-172.17.0.2-1732114016751:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:38809,null,null]) java.net.ConnectException: Call From 1a15ecfd95f4/172.17.0.2 to localhost:39089 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T14:47:44,993 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/WALs/1a15ecfd95f4,33939,1732114018411/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/oldWALs/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 2024-11-20T14:47:44,997 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/MasterData/oldWALs/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717 to hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/oldWALs/1a15ecfd95f4%2C33939%2C1732114018411.1732114018717$masterlocalwal$ 2024-11-20T14:47:44,997 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:47:44,997 INFO [M:0;1a15ecfd95f4:33939 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T14:47:44,997 INFO [M:0;1a15ecfd95f4:33939 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33939 2024-11-20T14:47:44,998 INFO [M:0;1a15ecfd95f4:33939 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:47:45,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:47:45,143 INFO [M:0;1a15ecfd95f4:33939 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:47:45,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33939-0x1015a00031e0000, quorum=127.0.0.1:52514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:47:45,148 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3aa0d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:45,149 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f0c7751{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:47:45,149 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:47:45,149 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ddd152f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:47:45,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22bae5a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,STOPPED} 2024-11-20T14:47:45,151 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:47:45,151 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:47:45,152 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1562597703-172.17.0.2-1732114016751 (Datanode Uuid 348bdec5-d7c6-496c-b942-4aee15d9f5c5) service to localhost/127.0.0.1:44451 2024-11-20T14:47:45,152 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:47:45,151 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@65522452 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:38809,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39089 , LocalHost:localPort 1a15ecfd95f4/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-20T14:47:45,152 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@65522452 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:43357,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1562597703-172.17.0.2-1732114016751 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:45,152 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@65522452 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38809,null,null], DatanodeInfoWithStorage[127.0.0.1:43357,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1562597703-172.17.0.2-1732114016751:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:38809,null,null], DatanodeInfoWithStorage[127.0.0.1:43357,null,null]] 2024-11-20T14:47:45,153 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@65522452 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:43357,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1562597703-172.17.0.2-1732114016751 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:45,153 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data3/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:45,153 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@65522452 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38809,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1562597703-172.17.0.2-1732114016751 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:47:45,153 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@65522452 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43357,null,null], DatanodeInfoWithStorage[127.0.0.1:38809,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1562597703-172.17.0.2-1732114016751:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43357,null,null], DatanodeInfoWithStorage[127.0.0.1:38809,null,null]] 2024-11-20T14:47:45,153 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data4/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:45,153 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:47:45,159 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@560cf715{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:45,160 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d0c94cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:47:45,160 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:47:45,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b135604{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:47:45,160 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@142b4537{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,STOPPED} 2024-11-20T14:47:45,162 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:47:45,162 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:47:45,162 WARN [BP-1562597703-172.17.0.2-1732114016751 heartbeating to localhost/127.0.0.1:44451 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1562597703-172.17.0.2-1732114016751 (Datanode Uuid b7440182-7070-42cd-94ff-3c29bebbf27a) service to localhost/127.0.0.1:44451 2024-11-20T14:47:45,162 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:47:45,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data5/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:45,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/cluster_5f416566-e55a-64bc-eacb-606fe09918b8/data/data6/current/BP-1562597703-172.17.0.2-1732114016751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:47:45,163 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:47:45,169 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24e73295{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:47:45,170 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@226d09c5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:47:45,170 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:47:45,170 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c22ffb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:47:45,170 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dda769c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir/,STOPPED} 2024-11-20T14:47:45,179 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T14:47:45,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:45,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:45,215 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T14:47:45,225 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34345 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:44451 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f4548bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:44451 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:44451 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44451 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:44451 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f4548bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:34345 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44451 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44451 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=241 (was 304), ProcessCount=11 (was 11), AvailableMemoryMB=10135 (was 10999) 2024-11-20T14:47:45,232 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=241, ProcessCount=11, AvailableMemoryMB=10135 2024-11-20T14:47:45,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T14:47:45,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.log.dir so I do NOT create it in target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3 2024-11-20T14:47:45,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/469f8025-c225-1aec-81f1-0a056fa16e1e/hadoop.tmp.dir so I do NOT create it in target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3 2024-11-20T14:47:45,232 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf, deleteOnExit=true 2024-11-20T14:47:45,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/test.cache.data in system properties and HBase conf 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir in system properties and HBase conf 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T14:47:45,233 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:47:45,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/nfs.dump.dir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/java.io.tmpdir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T14:47:45,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T14:47:45,246 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:47:45,532 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:47:45,538 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:47:45,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:47:45,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:47:45,547 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:47:45,550 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:47:45,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c8242e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:47:45,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2475ffae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:47:45,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@13b0bc33{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/java.io.tmpdir/jetty-localhost-43553-hadoop-hdfs-3_4_1-tests_jar-_-any-8207205130377634014/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:47:45,665 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5af08dcb{HTTP/1.1, (http/1.1)}{localhost:43553} 2024-11-20T14:47:45,665 INFO [Time-limited test {}] server.Server(415): Started @150864ms 2024-11-20T14:47:45,676 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:47:45,882 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:47:45,885 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:47:45,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:47:45,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:47:45,887 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:47:45,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b6170a4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:47:45,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@dbb4bbe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:47:45,987 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4da1d10a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/java.io.tmpdir/jetty-localhost-37127-hadoop-hdfs-3_4_1-tests_jar-_-any-8242681237329020634/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:45,987 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39960015{HTTP/1.1, (http/1.1)}{localhost:37127} 2024-11-20T14:47:45,988 INFO [Time-limited test {}] server.Server(415): Started @151187ms 2024-11-20T14:47:45,990 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:47:46,025 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:47:46,030 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:47:46,031 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:47:46,031 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:47:46,031 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:47:46,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b065be7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:47:46,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2572d67e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:47:46,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c3bdc88{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/java.io.tmpdir/jetty-localhost-37105-hadoop-hdfs-3_4_1-tests_jar-_-any-2829465246612777568/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:47:46,128 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d408466{HTTP/1.1, (http/1.1)}{localhost:37105} 2024-11-20T14:47:46,128 INFO [Time-limited test {}] server.Server(415): Started @151327ms 2024-11-20T14:47:46,130 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:47:46,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:46,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:46,820 WARN [Thread-1197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data1/current/BP-1527237828-172.17.0.2-1732114065257/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:46,820 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data2/current/BP-1527237828-172.17.0.2-1732114065257/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:46,840 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:47:46,842 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbce57659aac5e7a9 with lease ID 0xac87034e3900051b: Processing first storage report for DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f from datanode DatanodeRegistration(127.0.0.1:37477, datanodeUuid=b3676381-e9ef-4489-8ec7-e5041b093e52, infoPort=46465, infoSecurePort=0, ipcPort=43405, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257) 2024-11-20T14:47:46,842 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbce57659aac5e7a9 with lease ID 0xac87034e3900051b: from storage DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f node DatanodeRegistration(127.0.0.1:37477, datanodeUuid=b3676381-e9ef-4489-8ec7-e5041b093e52, infoPort=46465, infoSecurePort=0, ipcPort=43405, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:46,842 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbce57659aac5e7a9 with lease ID 0xac87034e3900051b: Processing first storage report for DS-11069ab3-4128-4678-8188-97db61721a1f from datanode DatanodeRegistration(127.0.0.1:37477, datanodeUuid=b3676381-e9ef-4489-8ec7-e5041b093e52, infoPort=46465, infoSecurePort=0, ipcPort=43405, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257) 2024-11-20T14:47:46,842 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbce57659aac5e7a9 with lease ID 0xac87034e3900051b: from storage DS-11069ab3-4128-4678-8188-97db61721a1f node DatanodeRegistration(127.0.0.1:37477, datanodeUuid=b3676381-e9ef-4489-8ec7-e5041b093e52, infoPort=46465, infoSecurePort=0, ipcPort=43405, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:46,971 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data3/current/BP-1527237828-172.17.0.2-1732114065257/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:46,971 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data4/current/BP-1527237828-172.17.0.2-1732114065257/current, will proceed with Du for space computation calculation, 2024-11-20T14:47:46,992 WARN [Thread-1184 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:47:46,994 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b70428cca630e12 with lease ID 0xac87034e3900051c: Processing first storage report for DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7 from datanode DatanodeRegistration(127.0.0.1:46673, datanodeUuid=485151a5-b0d9-49d8-9fb8-a65771eb4352, infoPort=39291, infoSecurePort=0, ipcPort=42227, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257) 2024-11-20T14:47:46,994 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b70428cca630e12 with lease ID 0xac87034e3900051c: from storage DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7 node DatanodeRegistration(127.0.0.1:46673, datanodeUuid=485151a5-b0d9-49d8-9fb8-a65771eb4352, infoPort=39291, infoSecurePort=0, ipcPort=42227, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:46,994 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b70428cca630e12 with lease ID 0xac87034e3900051c: Processing first storage report for DS-d5db250a-9e67-4f38-a1c0-4042b43d9fc3 from datanode DatanodeRegistration(127.0.0.1:46673, datanodeUuid=485151a5-b0d9-49d8-9fb8-a65771eb4352, infoPort=39291, infoSecurePort=0, ipcPort=42227, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257) 2024-11-20T14:47:46,994 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b70428cca630e12 with lease ID 0xac87034e3900051c: from storage DS-d5db250a-9e67-4f38-a1c0-4042b43d9fc3 node DatanodeRegistration(127.0.0.1:46673, datanodeUuid=485151a5-b0d9-49d8-9fb8-a65771eb4352, infoPort=39291, infoSecurePort=0, ipcPort=42227, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:47:47,063 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3 2024-11-20T14:47:47,066 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/zookeeper_0, clientPort=55126, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T14:47:47,067 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55126 2024-11-20T14:47:47,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:47,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:47,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:47:47,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:47:47,078 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48 with version=8 2024-11-20T14:47:47,078 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase-staging 2024-11-20T14:47:47,080 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:47:47,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:47,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:47,080 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:47:47,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:47,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:47:47,080 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T14:47:47,080 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:47:47,081 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39003 2024-11-20T14:47:47,082 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39003 connecting to ZooKeeper ensemble=127.0.0.1:55126 2024-11-20T14:47:47,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:390030x0, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:47:47,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39003-0x1015a00c13e0000 connected 2024-11-20T14:47:47,184 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:47,186 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:47,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:47:47,190 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48, hbase.cluster.distributed=false 2024-11-20T14:47:47,193 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:47:47,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39003 2024-11-20T14:47:47,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39003 2024-11-20T14:47:47,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39003 2024-11-20T14:47:47,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39003 2024-11-20T14:47:47,196 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39003 2024-11-20T14:47:47,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:47,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:47,214 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:47:47,214 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:47,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:47,215 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:47:47,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:47:47,215 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:47:47,215 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T14:47:47,215 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:47:47,215 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32987 2024-11-20T14:47:47,217 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32987 connecting to ZooKeeper ensemble=127.0.0.1:55126 2024-11-20T14:47:47,217 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:47,219 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:47,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329870x0, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:47:47,226 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32987-0x1015a00c13e0001 connected 2024-11-20T14:47:47,226 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:47:47,226 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T14:47:47,227 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T14:47:47,228 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T14:47:47,229 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:47:47,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32987 2024-11-20T14:47:47,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32987 2024-11-20T14:47:47,229 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32987 2024-11-20T14:47:47,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32987 2024-11-20T14:47:47,230 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32987 2024-11-20T14:47:47,239 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a15ecfd95f4:39003 2024-11-20T14:47:47,240 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:47,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:47:47,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:47:47,251 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:47,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T14:47:47,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,260 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T14:47:47,260 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a15ecfd95f4,39003,1732114067079 from backup master directory 2024-11-20T14:47:47,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:47,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:47:47,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:47:47,267 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:47:47,267 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:47,274 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/hbase.id] with ID: 7d52400b-b140-4788-9339-94dfcc4cb33f 2024-11-20T14:47:47,274 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/.tmp/hbase.id 2024-11-20T14:47:47,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:47:47,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:47:47,284 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/.tmp/hbase.id]:[hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/hbase.id] 2024-11-20T14:47:47,301 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:47,301 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T14:47:47,304 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-20T14:47:47,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:47:47,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:47:47,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:47:47,328 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T14:47:47,329 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:47:47,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:47:47,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:47:47,337 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store 2024-11-20T14:47:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:47:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:47:47,345 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:47:47,345 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:47:47,345 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:47:47,345 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:47:47,345 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:47:47,345 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:47:47,345 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:47:47,345 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114067345Disabling compacts and flushes for region at 1732114067345Disabling writes for close at 1732114067345Writing region close event to WAL at 1732114067345Closed at 1732114067345 2024-11-20T14:47:47,346 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/.initializing 2024-11-20T14:47:47,346 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:47,348 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C39003%2C1732114067079, suffix=, logDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079, archiveDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/oldWALs, maxLogs=10 2024-11-20T14:47:47,348 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 2024-11-20T14:47:47,353 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 2024-11-20T14:47:47,354 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39291:39291),(127.0.0.1/127.0.0.1:46465:46465)] 2024-11-20T14:47:47,354 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:47:47,354 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:47:47,354 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,354 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T14:47:47,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:47,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T14:47:47,361 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:47:47,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T14:47:47,362 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:47:47,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T14:47:47,364 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:47:47,365 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,365 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,365 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,367 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,367 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,367 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T14:47:47,369 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:47:47,370 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:47:47,371 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758528, jitterRate=-0.0354820191860199}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T14:47:47,371 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732114067355Initializing all the Stores at 1732114067355Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114067355Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114067358 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114067358Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114067358Cleaning up temporary data from old regions at 1732114067367 (+9 ms)Region opened successfully at 1732114067371 (+4 ms) 2024-11-20T14:47:47,372 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T14:47:47,375 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42210a2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:47:47,376 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T14:47:47,376 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T14:47:47,376 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T14:47:47,376 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T14:47:47,377 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T14:47:47,377 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T14:47:47,377 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T14:47:47,380 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T14:47:47,381 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T14:47:47,409 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T14:47:47,409 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T14:47:47,410 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T14:47:47,417 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T14:47:47,418 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T14:47:47,418 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T14:47:47,425 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T14:47:47,426 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T14:47:47,434 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T14:47:47,436 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T14:47:47,442 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T14:47:47,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:47:47,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:47:47,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,451 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a15ecfd95f4,39003,1732114067079, sessionid=0x1015a00c13e0000, setting cluster-up flag (Was=false) 2024-11-20T14:47:47,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,492 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T14:47:47,494 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:47,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:47,534 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T14:47:47,535 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:47,536 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T14:47:47,538 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T14:47:47,538 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T14:47:47,538 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T14:47:47,538 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a15ecfd95f4,39003,1732114067079 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T14:47:47,540 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:47:47,540 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:47:47,540 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:47:47,540 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:47:47,540 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a15ecfd95f4:0, corePoolSize=10, maxPoolSize=10 2024-11-20T14:47:47,540 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,540 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:47:47,540 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,541 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732114097541 2024-11-20T14:47:47,541 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T14:47:47,542 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T14:47:47,542 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T14:47:47,542 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T14:47:47,542 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T14:47:47,542 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T14:47:47,542 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,542 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:47:47,542 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T14:47:47,542 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T14:47:47,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T14:47:47,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T14:47:47,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T14:47:47,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T14:47:47,543 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114067543,5,FailOnTimeoutGroup] 2024-11-20T14:47:47,543 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114067543,5,FailOnTimeoutGroup] 2024-11-20T14:47:47,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T14:47:47,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,544 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,544 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,544 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T14:47:47,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:47:47,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:47:47,553 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T14:47:47,553 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48 2024-11-20T14:47:47,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:47:47,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:47:47,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:47:47,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:47:47,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:47:47,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:47,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:47:47,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:47:47,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:47,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:47:47,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:47:47,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:47,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:47:47,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:47:47,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:47,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:47,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:47:47,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740 2024-11-20T14:47:47,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740 2024-11-20T14:47:47,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:47:47,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:47:47,573 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:47:47,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:47:47,576 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:47:47,576 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866195, jitterRate=0.10142485797405243}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:47:47,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732114067560Initializing all the Stores at 1732114067561 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114067561Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114067562 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114067562Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114067562Cleaning up temporary data from old regions at 1732114067572 (+10 ms)Region opened successfully at 1732114067576 (+4 ms) 2024-11-20T14:47:47,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:47:47,577 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:47:47,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:47:47,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:47:47,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:47:47,577 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:47:47,577 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114067577Disabling compacts and flushes for region at 1732114067577Disabling writes for close at 1732114067577Writing region close event to WAL at 1732114067577Closed at 1732114067577 2024-11-20T14:47:47,578 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:47:47,578 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T14:47:47,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T14:47:47,580 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:47:47,581 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T14:47:47,633 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(746): ClusterId : 7d52400b-b140-4788-9339-94dfcc4cb33f 2024-11-20T14:47:47,633 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T14:47:47,650 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T14:47:47,650 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T14:47:47,661 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T14:47:47,661 DEBUG [RS:0;1a15ecfd95f4:32987 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@596ec161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:47:47,677 DEBUG [RS:0;1a15ecfd95f4:32987 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a15ecfd95f4:32987 2024-11-20T14:47:47,677 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T14:47:47,677 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T14:47:47,677 DEBUG [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T14:47:47,678 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a15ecfd95f4,39003,1732114067079 with port=32987, startcode=1732114067214 2024-11-20T14:47:47,678 DEBUG [RS:0;1a15ecfd95f4:32987 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T14:47:47,680 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41983, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T14:47:47,680 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39003 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:47,681 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39003 {}] master.ServerManager(517): Registering regionserver=1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:47,682 DEBUG [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48 2024-11-20T14:47:47,682 DEBUG [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44871 2024-11-20T14:47:47,682 DEBUG [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T14:47:47,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:47:47,693 DEBUG [RS:0;1a15ecfd95f4:32987 {}] zookeeper.ZKUtil(111): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:47,693 WARN [RS:0;1a15ecfd95f4:32987 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:47:47,693 INFO [RS:0;1a15ecfd95f4:32987 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:47:47,693 DEBUG [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:47,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a15ecfd95f4,32987,1732114067214] 2024-11-20T14:47:47,697 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T14:47:47,698 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T14:47:47,699 INFO [RS:0;1a15ecfd95f4:32987 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:47:47,699 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,699 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T14:47:47,700 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T14:47:47,700 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,700 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,701 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,701 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:47:47,701 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:47:47,701 DEBUG [RS:0;1a15ecfd95f4:32987 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:47:47,701 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,701 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,701 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,701 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,701 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,701 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,32987,1732114067214-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:47:47,715 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T14:47:47,715 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,32987,1732114067214-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,715 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,715 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.Replication(171): 1a15ecfd95f4,32987,1732114067214 started 2024-11-20T14:47:47,728 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:47,728 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1482): Serving as 1a15ecfd95f4,32987,1732114067214, RpcServer on 1a15ecfd95f4/172.17.0.2:32987, sessionid=0x1015a00c13e0001 2024-11-20T14:47:47,728 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T14:47:47,728 DEBUG [RS:0;1a15ecfd95f4:32987 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:47,728 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,32987,1732114067214' 2024-11-20T14:47:47,728 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T14:47:47,729 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T14:47:47,729 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T14:47:47,729 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T14:47:47,729 DEBUG [RS:0;1a15ecfd95f4:32987 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:47,729 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,32987,1732114067214' 2024-11-20T14:47:47,729 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T14:47:47,729 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T14:47:47,730 DEBUG [RS:0;1a15ecfd95f4:32987 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T14:47:47,730 INFO [RS:0;1a15ecfd95f4:32987 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T14:47:47,730 INFO [RS:0;1a15ecfd95f4:32987 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T14:47:47,731 WARN [1a15ecfd95f4:39003 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-20T14:47:47,832 INFO [RS:0;1a15ecfd95f4:32987 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C32987%2C1732114067214, suffix=, logDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214, archiveDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/oldWALs, maxLogs=32 2024-11-20T14:47:47,833 INFO [RS:0;1a15ecfd95f4:32987 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 2024-11-20T14:47:47,841 INFO [RS:0;1a15ecfd95f4:32987 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 2024-11-20T14:47:47,843 DEBUG [RS:0;1a15ecfd95f4:32987 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46465:46465),(127.0.0.1/127.0.0.1:39291:39291)] 2024-11-20T14:47:47,981 DEBUG [1a15ecfd95f4:39003 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T14:47:47,982 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:47,984 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,32987,1732114067214, state=OPENING 2024-11-20T14:47:48,007 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T14:47:48,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:48,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:47:48,018 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:47:48,018 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:47:48,019 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:47:48,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,32987,1732114067214}] 2024-11-20T14:47:48,176 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T14:47:48,179 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34173, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T14:47:48,185 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T14:47:48,185 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:47:48,189 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C32987%2C1732114067214.meta, suffix=.meta, logDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214, archiveDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/oldWALs, maxLogs=32 2024-11-20T14:47:48,190 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta 2024-11-20T14:47:48,196 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta 2024-11-20T14:47:48,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:48,203 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39291:39291),(127.0.0.1/127.0.0.1:46465:46465)] 2024-11-20T14:47:48,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:48,207 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:47:48,207 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T14:47:48,207 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T14:47:48,207 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T14:47:48,207 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T14:47:48,208 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:47:48,208 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T14:47:48,208 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T14:47:48,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:47:48,210 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:47:48,210 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:48,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:48,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:47:48,212 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:47:48,212 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:48,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:48,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:47:48,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:47:48,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:48,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:48,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:47:48,214 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:47:48,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:48,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:47:48,214 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:47:48,215 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740 2024-11-20T14:47:48,216 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740 2024-11-20T14:47:48,217 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:47:48,217 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:47:48,217 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:47:48,219 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:47:48,220 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882459, jitterRate=0.12210521101951599}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:47:48,220 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T14:47:48,220 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732114068208Writing region info on filesystem at 1732114068208Initializing all the Stores at 1732114068209 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114068209Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114068209Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114068209Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114068209Cleaning up temporary data from old regions at 1732114068217 (+8 ms)Running coprocessor post-open hooks at 1732114068220 (+3 ms)Region opened successfully at 1732114068220 2024-11-20T14:47:48,221 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732114068175 2024-11-20T14:47:48,224 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T14:47:48,224 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T14:47:48,225 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:48,226 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,32987,1732114067214, state=OPEN 2024-11-20T14:47:48,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:47:48,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:47:48,294 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:48,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:47:48,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:47:48,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T14:47:48,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,32987,1732114067214 in 276 msec 2024-11-20T14:47:48,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T14:47:48,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 725 msec 2024-11-20T14:47:48,308 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:47:48,308 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T14:47:48,310 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:47:48,310 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,32987,1732114067214, seqNum=-1] 2024-11-20T14:47:48,311 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:47:48,312 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36807, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:47:48,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 780 msec 2024-11-20T14:47:48,320 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732114068320, completionTime=-1 2024-11-20T14:47:48,320 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T14:47:48,320 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-20T14:47:48,321 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-20T14:47:48,321 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732114128321 2024-11-20T14:47:48,321 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732114188321 2024-11-20T14:47:48,321 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-20T14:47:48,322 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,39003,1732114067079-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:48,322 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,39003,1732114067079-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:48,322 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,39003,1732114067079-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:48,322 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a15ecfd95f4:39003, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:48,322 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:48,322 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:48,324 DEBUG [master/1a15ecfd95f4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T14:47:48,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.058sec 2024-11-20T14:47:48,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T14:47:48,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T14:47:48,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T14:47:48,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T14:47:48,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T14:47:48,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,39003,1732114067079-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:47:48,326 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,39003,1732114067079-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T14:47:48,328 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T14:47:48,328 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T14:47:48,328 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,39003,1732114067079-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:47:48,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b2afdb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:47:48,332 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a15ecfd95f4,39003,-1 for getting cluster id 2024-11-20T14:47:48,333 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T14:47:48,334 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7d52400b-b140-4788-9339-94dfcc4cb33f' 2024-11-20T14:47:48,335 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T14:47:48,335 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7d52400b-b140-4788-9339-94dfcc4cb33f" 2024-11-20T14:47:48,335 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64156326, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:47:48,335 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a15ecfd95f4,39003,-1] 2024-11-20T14:47:48,335 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T14:47:48,335 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:47:48,337 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39684, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T14:47:48,338 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e2ba616, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:47:48,338 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:47:48,339 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,32987,1732114067214, seqNum=-1] 2024-11-20T14:47:48,340 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:47:48,341 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:47:48,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:48,344 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:47:48,347 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T14:47:48,347 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-20T14:47:48,347 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-20T14:47:48,347 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T14:47:48,348 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a15ecfd95f4,39003,1732114067079 2024-11-20T14:47:48,348 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1159de1d 2024-11-20T14:47:48,348 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T14:47:48,350 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39698, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T14:47:48,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39003 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T14:47:48,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39003 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T14:47:48,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39003 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:47:48,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39003 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T14:47:48,353 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T14:47:48,354 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:48,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39003 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-20T14:47:48,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39003 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:47:48,355 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T14:47:48,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741835_1011 (size=395) 2024-11-20T14:47:48,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741835_1011 (size=395) 2024-11-20T14:47:48,363 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0a82dd5f2d9d96669a0208e5304ff21e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48 2024-11-20T14:47:48,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46673 is added to blk_1073741836_1012 (size=78) 2024-11-20T14:47:48,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741836_1012 (size=78) 2024-11-20T14:47:48,370 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:47:48,370 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 0a82dd5f2d9d96669a0208e5304ff21e, disabling compactions & flushes 2024-11-20T14:47:48,370 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:47:48,370 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:47:48,370 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. after waiting 0 ms 2024-11-20T14:47:48,370 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:47:48,370 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:47:48,370 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0a82dd5f2d9d96669a0208e5304ff21e: Waiting for close lock at 1732114068370Disabling compacts and flushes for region at 1732114068370Disabling writes for close at 1732114068370Writing region close event to WAL at 1732114068370Closed at 1732114068370 2024-11-20T14:47:48,372 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T14:47:48,372 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732114068372"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732114068372"}]},"ts":"1732114068372"} 2024-11-20T14:47:48,374 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T14:47:48,375 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T14:47:48,375 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732114068375"}]},"ts":"1732114068375"} 2024-11-20T14:47:48,377 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-20T14:47:48,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0a82dd5f2d9d96669a0208e5304ff21e, ASSIGN}] 2024-11-20T14:47:48,379 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0a82dd5f2d9d96669a0208e5304ff21e, ASSIGN 2024-11-20T14:47:48,380 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0a82dd5f2d9d96669a0208e5304ff21e, ASSIGN; state=OFFLINE, location=1a15ecfd95f4,32987,1732114067214; forceNewPlan=false, retain=false 2024-11-20T14:47:48,531 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0a82dd5f2d9d96669a0208e5304ff21e, regionState=OPENING, regionLocation=1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:48,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0a82dd5f2d9d96669a0208e5304ff21e, ASSIGN because future has completed 2024-11-20T14:47:48,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0a82dd5f2d9d96669a0208e5304ff21e, server=1a15ecfd95f4,32987,1732114067214}] 2024-11-20T14:47:48,693 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:47:48,694 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0a82dd5f2d9d96669a0208e5304ff21e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:47:48,694 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,694 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:47:48,694 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,694 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,696 INFO [StoreOpener-0a82dd5f2d9d96669a0208e5304ff21e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,698 INFO [StoreOpener-0a82dd5f2d9d96669a0208e5304ff21e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a82dd5f2d9d96669a0208e5304ff21e columnFamilyName info 2024-11-20T14:47:48,698 DEBUG [StoreOpener-0a82dd5f2d9d96669a0208e5304ff21e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:47:48,699 INFO [StoreOpener-0a82dd5f2d9d96669a0208e5304ff21e-1 {}] regionserver.HStore(327): Store=0a82dd5f2d9d96669a0208e5304ff21e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:47:48,699 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,701 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,701 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,702 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,702 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,704 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,707 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:47:48,708 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0a82dd5f2d9d96669a0208e5304ff21e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813108, jitterRate=0.03392067551612854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T14:47:48,708 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:47:48,709 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0a82dd5f2d9d96669a0208e5304ff21e: Running coprocessor pre-open hook at 1732114068695Writing region info on filesystem at 1732114068695Initializing all the Stores at 1732114068696 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114068696Cleaning up temporary data from old regions at 1732114068702 (+6 ms)Running coprocessor post-open hooks at 1732114068708 (+6 ms)Region opened successfully at 1732114068709 (+1 ms) 2024-11-20T14:47:48,711 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e., pid=6, masterSystemTime=1732114068687 2024-11-20T14:47:48,713 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:47:48,713 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:47:48,714 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0a82dd5f2d9d96669a0208e5304ff21e, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,32987,1732114067214 2024-11-20T14:47:48,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0a82dd5f2d9d96669a0208e5304ff21e, server=1a15ecfd95f4,32987,1732114067214 because future has completed 2024-11-20T14:47:48,722 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T14:47:48,722 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0a82dd5f2d9d96669a0208e5304ff21e, server=1a15ecfd95f4,32987,1732114067214 in 184 msec 2024-11-20T14:47:48,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T14:47:48,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=0a82dd5f2d9d96669a0208e5304ff21e, ASSIGN in 345 msec 2024-11-20T14:47:48,726 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T14:47:48,726 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732114068726"}]},"ts":"1732114068726"} 2024-11-20T14:47:48,728 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-20T14:47:48,729 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T14:47:48,731 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 378 msec 2024-11-20T14:47:49,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:49,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:50,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:50,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:50,598 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T14:47:50,598 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T14:47:50,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T14:47:50,599 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-20T14:47:50,600 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:47:50,600 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T14:47:51,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:51,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:52,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:52,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:53,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:53,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:53,711 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:47:53,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:47:53,746 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T14:47:53,746 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-20T14:47:54,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:54,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:55,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:55,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:56,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:56,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:57,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:57,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:58,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:58,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:58,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39003 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:47:58,395 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-20T14:47:58,395 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-20T14:47:58,397 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T14:47:58,397 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:47:58,402 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e., hostname=1a15ecfd95f4,32987,1732114067214, seqNum=2] 2024-11-20T14:47:59,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:47:59,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:00,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:00,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:00,405 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 2024-11-20T14:48:00,406 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:46673,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:00,406 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:00,406 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:00,407 WARN [DataStreamer for file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 block BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46673,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK], DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46673,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]) is bad. 2024-11-20T14:48:00,407 WARN [DataStreamer for file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta block BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46673,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK], DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46673,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]) is bad. 2024-11-20T14:48:00,407 WARN [DataStreamer for file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 block BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:46673,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46673,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]) is bad. 2024-11-20T14:48:00,407 WARN [PacketResponder: BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46673] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:33608 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33608 dst: /127.0.0.1:37477 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-490072865_22 at /127.0.0.1:40702 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40702 dst: /127.0.0.1:46673 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:40756 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40756 dst: /127.0.0.1:46673 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:40740 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40740 dst: /127.0.0.1:46673 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,410 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-490072865_22 at /127.0.0.1:33574 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33574 dst: /127.0.0.1:37477 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,410 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:33616 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33616 dst: /127.0.0.1:37477 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c3bdc88{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:00,439 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d408466{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:48:00,439 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:48:00,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2572d67e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:48:00,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b065be7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,STOPPED} 2024-11-20T14:48:00,441 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:48:00,441 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:48:00,441 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1527237828-172.17.0.2-1732114065257 (Datanode Uuid 485151a5-b0d9-49d8-9fb8-a65771eb4352) service to localhost/127.0.0.1:44871 2024-11-20T14:48:00,441 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:48:00,442 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data3/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:00,442 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data4/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:00,443 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:48:00,457 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:48:00,463 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:48:00,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:48:00,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:48:00,464 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:48:00,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6079e51{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:48:00,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2164df77{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:48:00,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@299db040{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/java.io.tmpdir/jetty-localhost-40711-hadoop-hdfs-3_4_1-tests_jar-_-any-11298124783851623360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:00,559 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3564c776{HTTP/1.1, (http/1.1)}{localhost:40711} 2024-11-20T14:48:00,559 INFO [Time-limited test {}] server.Server(415): Started @165758ms 2024-11-20T14:48:00,560 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:48:00,579 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:00,579 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:00,579 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:00,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:35218 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35218 dst: /127.0.0.1:37477 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-490072865_22 at /127.0.0.1:35206 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35206 dst: /127.0.0.1:37477 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:35220 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37477:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35220 dst: /127.0.0.1:37477 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:00,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4da1d10a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:00,584 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39960015{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:48:00,584 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:48:00,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@dbb4bbe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:48:00,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b6170a4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,STOPPED} 2024-11-20T14:48:00,585 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:48:00,585 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1527237828-172.17.0.2-1732114065257 (Datanode Uuid b3676381-e9ef-4489-8ec7-e5041b093e52) service to localhost/127.0.0.1:44871 2024-11-20T14:48:00,586 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data1/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:00,586 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:48:00,586 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:48:00,586 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data2/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:00,586 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:48:00,593 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:48:00,596 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:48:00,597 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:48:00,597 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:48:00,597 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:48:00,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58965b82{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:48:00,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42f88f6e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:48:00,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71faeb58{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/java.io.tmpdir/jetty-localhost-39469-hadoop-hdfs-3_4_1-tests_jar-_-any-15039741210733819149/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:00,691 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e15dcb6{HTTP/1.1, (http/1.1)}{localhost:39469} 2024-11-20T14:48:00,692 INFO [Time-limited test {}] server.Server(415): Started @165891ms 2024-11-20T14:48:00,693 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:48:01,017 WARN [Thread-1332 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:48:01,020 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1accd54b20993bf0 with lease ID 0xac87034e3900051d: from storage DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7 node DatanodeRegistration(127.0.0.1:45815, datanodeUuid=485151a5-b0d9-49d8-9fb8-a65771eb4352, infoPort=43513, infoSecurePort=0, ipcPort=43555, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:01,020 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1accd54b20993bf0 with lease ID 0xac87034e3900051d: from storage DS-d5db250a-9e67-4f38-a1c0-4042b43d9fc3 node DatanodeRegistration(127.0.0.1:45815, datanodeUuid=485151a5-b0d9-49d8-9fb8-a65771eb4352, infoPort=43513, infoSecurePort=0, ipcPort=43555, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:01,091 WARN [Thread-1352 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:48:01,093 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30a6f289d2ffb2e0 with lease ID 0xac87034e3900051e: from storage DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f node DatanodeRegistration(127.0.0.1:40177, datanodeUuid=b3676381-e9ef-4489-8ec7-e5041b093e52, infoPort=42579, infoSecurePort=0, ipcPort=40457, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:01,093 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30a6f289d2ffb2e0 with lease ID 0xac87034e3900051e: from storage DS-11069ab3-4128-4678-8188-97db61721a1f node DatanodeRegistration(127.0.0.1:40177, datanodeUuid=b3676381-e9ef-4489-8ec7-e5041b093e52, infoPort=42579, infoSecurePort=0, ipcPort=40457, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:01,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:01,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:01,708 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-20T14:48:01,713 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-20T14:48:01,715 ERROR [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48-prefix:1a15ecfd95f4,32987,1732114067214 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:01,716 WARN [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48-prefix:1a15ecfd95f4,32987,1732114067214 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:01,716 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C32987%2C1732114067214:(num 1732114067833) roll requested 2024-11-20T14:48:01,716 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 2024-11-20T14:48:01,723 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 newFile=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 2024-11-20T14:48:01,723 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:01,723 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:01,723 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:01,723 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:01,723 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:01,724 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 2024-11-20T14:48:01,724 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:01,724 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:01,724 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 2024-11-20T14:48:01,725 WARN [IPC Server handler 4 on default port 44871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-20T14:48:01,725 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 after 1ms 2024-11-20T14:48:01,730 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42579:42579),(127.0.0.1/127.0.0.1:43513:43513)] 2024-11-20T14:48:01,731 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 is not closed yet, will try archiving it next time 2024-11-20T14:48:02,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:02,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:03,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:03,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:03,734 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-20T14:48:04,019 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T14:48:04,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:04,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:05,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:05,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:05,726 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 after 4002ms 2024-11-20T14:48:05,737 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:05,738 WARN [DataStreamer for file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 block BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40177,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:45815,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40177,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]) is bad. 2024-11-20T14:48:05,739 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:57462 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45815:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57462 dst: /127.0.0.1:45815 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:05,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:41824 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40177:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41824 dst: /127.0.0.1:40177 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:05,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71faeb58{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:05,780 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e15dcb6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:48:05,780 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:48:05,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42f88f6e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:48:05,781 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58965b82{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,STOPPED} 2024-11-20T14:48:05,783 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:48:05,783 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:48:05,783 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:48:05,783 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1527237828-172.17.0.2-1732114065257 (Datanode Uuid b3676381-e9ef-4489-8ec7-e5041b093e52) service to localhost/127.0.0.1:44871 2024-11-20T14:48:05,784 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data1/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:05,784 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data2/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:05,785 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:48:05,794 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:48:05,797 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:48:05,798 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:48:05,798 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:48:05,798 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:48:05,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@589b81ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:48:05,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a2d8801{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:48:05,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30e88901{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/java.io.tmpdir/jetty-localhost-41471-hadoop-hdfs-3_4_1-tests_jar-_-any-15995018202854043620/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:05,894 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50b2108d{HTTP/1.1, (http/1.1)}{localhost:41471} 2024-11-20T14:48:05,894 INFO [Time-limited test {}] server.Server(415): Started @171094ms 2024-11-20T14:48:05,895 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:48:05,916 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:05,917 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_932342195_22 at /127.0.0.1:45926 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45815:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45926 dst: /127.0.0.1:45815 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:05,918 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@299db040{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:05,919 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3564c776{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:48:05,919 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:48:05,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2164df77{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:48:05,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6079e51{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,STOPPED} 2024-11-20T14:48:05,920 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:48:05,920 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:48:05,920 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1527237828-172.17.0.2-1732114065257 (Datanode Uuid 485151a5-b0d9-49d8-9fb8-a65771eb4352) service to localhost/127.0.0.1:44871 2024-11-20T14:48:05,920 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:48:05,921 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data3/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:05,921 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data4/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:05,921 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:48:05,929 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:48:05,932 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:48:05,933 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:48:05,933 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:48:05,934 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:48:05,934 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f908e41{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:48:05,934 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29217bb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:48:06,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d143a5e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/java.io.tmpdir/jetty-localhost-35173-hadoop-hdfs-3_4_1-tests_jar-_-any-16382796690738219605/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:06,026 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1324c29{HTTP/1.1, (http/1.1)}{localhost:35173} 2024-11-20T14:48:06,027 INFO [Time-limited test {}] server.Server(415): Started @171226ms 2024-11-20T14:48:06,028 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:48:06,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:06,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:06,315 WARN [Thread-1406 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:48:06,318 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x223435e1de74104f with lease ID 0xac87034e3900051f: from storage DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f node DatanodeRegistration(127.0.0.1:39031, datanodeUuid=b3676381-e9ef-4489-8ec7-e5041b093e52, infoPort=40733, infoSecurePort=0, ipcPort=37445, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:06,318 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x223435e1de74104f with lease ID 0xac87034e3900051f: from storage DS-11069ab3-4128-4678-8188-97db61721a1f node DatanodeRegistration(127.0.0.1:39031, datanodeUuid=b3676381-e9ef-4489-8ec7-e5041b093e52, infoPort=40733, infoSecurePort=0, ipcPort=37445, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:06,440 WARN [Thread-1426 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:48:06,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6d8d26f7f2a5387 with lease ID 0xac87034e39000520: from storage DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7 node DatanodeRegistration(127.0.0.1:44217, datanodeUuid=485151a5-b0d9-49d8-9fb8-a65771eb4352, infoPort=43217, infoSecurePort=0, ipcPort=43023, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:06,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6d8d26f7f2a5387 with lease ID 0xac87034e39000520: from storage DS-d5db250a-9e67-4f38-a1c0-4042b43d9fc3 node DatanodeRegistration(127.0.0.1:44217, datanodeUuid=485151a5-b0d9-49d8-9fb8-a65771eb4352, infoPort=43217, infoSecurePort=0, ipcPort=43023, storageInfo=lv=-57;cid=testClusterID;nsid=1132610476;c=1732114065257), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:07,048 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-20T14:48:07,051 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-20T14:48:07,053 ERROR [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48-prefix:1a15ecfd95f4,32987,1732114067214 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45815,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:07,054 WARN [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48-prefix:1a15ecfd95f4,32987,1732114067214 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45815,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:07,054 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C32987%2C1732114067214:(num 1732114081716) roll requested 2024-11-20T14:48:07,054 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 2024-11-20T14:48:07,065 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 newFile=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 2024-11-20T14:48:07,065 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:07,065 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:07,065 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:07,066 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:07,066 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:07,066 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 2024-11-20T14:48:07,066 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45815,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:07,067 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45815,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:07,067 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 2024-11-20T14:48:07,067 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40733:40733),(127.0.0.1/127.0.0.1:43217:43217)] 2024-11-20T14:48:07,067 WARN [IPC Server handler 1 on default port 44871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-20T14:48:07,067 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 is not closed yet, will try archiving it next time 2024-11-20T14:48:07,068 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 after 1ms 2024-11-20T14:48:07,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:07,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:08,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:08,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:09,069 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:09,080 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 newFile=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:09,080 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:09,080 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:09,081 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:09,081 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:09,081 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:09,081 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:09,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741838_1019 (size=1264) 2024-11-20T14:48:09,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741838_1019 (size=1264) 2024-11-20T14:48:09,087 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43217:43217),(127.0.0.1/127.0.0.1:40733:40733)] 2024-11-20T14:48:09,087 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 is not closed yet, will try archiving it next time 2024-11-20T14:48:09,087 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 is not closed yet, will try archiving it next time 2024-11-20T14:48:09,087 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 2024-11-20T14:48:09,087 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 2024-11-20T14:48:09,088 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 after 1ms 2024-11-20T14:48:09,088 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 2024-11-20T14:48:09,099 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732114068709/Put/vlen=218/seqid=0] 2024-11-20T14:48:09,099 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732114078403/Put/vlen=1045/seqid=0] 2024-11-20T14:48:09,099 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114067833 2024-11-20T14:48:09,099 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 2024-11-20T14:48:09,099 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 2024-11-20T14:48:09,100 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 after 1ms 2024-11-20T14:48:09,100 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 2024-11-20T14:48:09,104 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732114081715/Put/vlen=1045/seqid=0] 2024-11-20T14:48:09,104 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732114083735/Put/vlen=1045/seqid=0] 2024-11-20T14:48:09,105 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 2024-11-20T14:48:09,105 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 2024-11-20T14:48:09,105 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 2024-11-20T14:48:09,105 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 after 0ms 2024-11-20T14:48:09,105 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114087054 2024-11-20T14:48:09,110 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732114087053/Put/vlen=1045/seqid=0] 2024-11-20T14:48:09,110 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:09,110 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:09,111 WARN [IPC Server handler 1 on default port 44871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-20T14:48:09,111 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 after 1ms 2024-11-20T14:48:09,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:09,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:09,444 WARN [ResponseProcessor for block BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:09,444 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-490072865_22 at /127.0.0.1:33718 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33718 dst: /127.0.0.1:44217 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44217 remote=/127.0.0.1:33718]. Total timeout mills is 60000, 59636 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:09,444 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-490072865_22 at /127.0.0.1:46142 [Receiving block BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39031:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46142 dst: /127.0.0.1:39031 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:09,444 WARN [DataStreamer for file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 block BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44217,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK], DatanodeInfoWithStorage[127.0.0.1:39031,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44217,DS-696904a2-1e7e-43cf-8d3e-aec4b9d96ed7,DISK]) is bad. 2024-11-20T14:48:09,448 WARN [DataStreamer for file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 block BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:09,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741839_1022 (size=85) 2024-11-20T14:48:09,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741839_1022 (size=85) 2024-11-20T14:48:10,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:10,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:10,319 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T14:48:11,069 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114081716 after 4002ms 2024-11-20T14:48:11,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:11,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:12,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:12,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:13,112 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 after 4002ms 2024-11-20T14:48:13,112 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:13,117 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:13,117 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-20T14:48:13,118 ERROR [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48-prefix:1a15ecfd95f4,32987,1732114067214.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:13,118 WARN [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48-prefix:1a15ecfd95f4,32987,1732114067214.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:13,118 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C32987%2C1732114067214.meta:.meta(num 1732114068190) roll requested 2024-11-20T14:48:13,118 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114093118.meta 2024-11-20T14:48:13,124 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,124 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,124 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,124 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,125 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,125 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114093118.meta 2024-11-20T14:48:13,125 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:13,125 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:13,125 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta 2024-11-20T14:48:13,126 WARN [IPC Server handler 2 on default port 44871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-11-20T14:48:13,126 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta after 1ms 2024-11-20T14:48:13,126 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43217:43217),(127.0.0.1/127.0.0.1:40733:40733)] 2024-11-20T14:48:13,126 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta is not closed yet, will try archiving it next time 2024-11-20T14:48:13,142 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/info/ea2bd73c22b04b2ebb3a0fd12f759f4f is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e./info:regioninfo/1732114068714/Put/seqid=0 2024-11-20T14:48:13,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741841_1025 (size=7125) 2024-11-20T14:48:13,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741841_1025 (size=7125) 2024-11-20T14:48:13,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:13,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:13,553 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/info/ea2bd73c22b04b2ebb3a0fd12f759f4f 2024-11-20T14:48:13,576 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/ns/bd9341d063144da6bce2cd54d7292947 is 43, key is default/ns:d/1732114068313/Put/seqid=0 2024-11-20T14:48:13,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741842_1026 (size=5153) 2024-11-20T14:48:13,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741842_1026 (size=5153) 2024-11-20T14:48:13,591 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/ns/bd9341d063144da6bce2cd54d7292947 2024-11-20T14:48:13,612 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/table/18fefb196e5f45799a3061660e18b71c is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732114068726/Put/seqid=0 2024-11-20T14:48:13,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741843_1027 (size=5438) 2024-11-20T14:48:13,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741843_1027 (size=5438) 2024-11-20T14:48:13,618 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/table/18fefb196e5f45799a3061660e18b71c 2024-11-20T14:48:13,624 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/info/ea2bd73c22b04b2ebb3a0fd12f759f4f as hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/info/ea2bd73c22b04b2ebb3a0fd12f759f4f 2024-11-20T14:48:13,629 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/info/ea2bd73c22b04b2ebb3a0fd12f759f4f, entries=10, sequenceid=11, filesize=7.0 K 2024-11-20T14:48:13,631 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/ns/bd9341d063144da6bce2cd54d7292947 as hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/ns/bd9341d063144da6bce2cd54d7292947 2024-11-20T14:48:13,637 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/ns/bd9341d063144da6bce2cd54d7292947, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T14:48:13,638 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/.tmp/table/18fefb196e5f45799a3061660e18b71c as hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/table/18fefb196e5f45799a3061660e18b71c 2024-11-20T14:48:13,644 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/table/18fefb196e5f45799a3061660e18b71c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-20T14:48:13,646 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 528ms, sequenceid=11, compaction requested=false 2024-11-20T14:48:13,646 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T14:48:13,646 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0a82dd5f2d9d96669a0208e5304ff21e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-20T14:48:13,646 ERROR [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48-prefix:1a15ecfd95f4,32987,1732114067214 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:13,647 WARN [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48-prefix:1a15ecfd95f4,32987,1732114067214 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:13,647 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C32987%2C1732114067214:(num 1732114089069) roll requested 2024-11-20T14:48:13,647 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C32987%2C1732114067214.1732114093647 2024-11-20T14:48:13,656 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 newFile=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114093647 2024-11-20T14:48:13,656 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,656 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,656 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,656 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,657 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,657 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114093647 2024-11-20T14:48:13,657 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:13,657 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1527237828-172.17.0.2-1732114065257:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:13,658 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:13,658 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 after 0ms 2024-11-20T14:48:13,663 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 to hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/oldWALs/1a15ecfd95f4%2C32987%2C1732114067214.1732114089069 2024-11-20T14:48:13,663 DEBUG [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40733:40733),(127.0.0.1/127.0.0.1:43217:43217)] 2024-11-20T14:48:13,680 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e/.tmp/info/8eb3ce056ef7419c88119d874cc3b87a is 1080, key is row1002/info:/1732114078403/Put/seqid=0 2024-11-20T14:48:13,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741845_1029 (size=9270) 2024-11-20T14:48:13,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741845_1029 (size=9270) 2024-11-20T14:48:13,689 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e/.tmp/info/8eb3ce056ef7419c88119d874cc3b87a 2024-11-20T14:48:13,696 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e/.tmp/info/8eb3ce056ef7419c88119d874cc3b87a as hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e/info/8eb3ce056ef7419c88119d874cc3b87a 2024-11-20T14:48:13,703 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e/info/8eb3ce056ef7419c88119d874cc3b87a, entries=4, sequenceid=8, filesize=9.1 K 2024-11-20T14:48:13,704 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 0a82dd5f2d9d96669a0208e5304ff21e in 58ms, sequenceid=8, compaction requested=false 2024-11-20T14:48:13,704 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0a82dd5f2d9d96669a0208e5304ff21e: 2024-11-20T14:48:13,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T14:48:13,711 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:48:13,711 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:48:13,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:48:13,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:48:13,711 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T14:48:13,711 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T14:48:13,711 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1914185100, stopped=false 2024-11-20T14:48:13,711 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a15ecfd95f4,39003,1732114067079 2024-11-20T14:48:13,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:48:13,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:13,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:48:13,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:13,763 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:48:13,763 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:48:13,763 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:48:13,763 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:48:13,763 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:48:13,763 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:48:13,763 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a15ecfd95f4,32987,1732114067214' ***** 2024-11-20T14:48:13,763 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T14:48:13,763 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T14:48:13,763 INFO [RS:0;1a15ecfd95f4:32987 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T14:48:13,763 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T14:48:13,763 INFO [RS:0;1a15ecfd95f4:32987 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T14:48:13,764 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(3091): Received CLOSE for 0a82dd5f2d9d96669a0208e5304ff21e 2024-11-20T14:48:13,764 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(959): stopping server 1a15ecfd95f4,32987,1732114067214 2024-11-20T14:48:13,764 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:48:13,764 INFO [RS:0;1a15ecfd95f4:32987 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a15ecfd95f4:32987. 2024-11-20T14:48:13,764 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0a82dd5f2d9d96669a0208e5304ff21e, disabling compactions & flushes 2024-11-20T14:48:13,764 DEBUG [RS:0;1a15ecfd95f4:32987 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:48:13,764 DEBUG [RS:0;1a15ecfd95f4:32987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:48:13,764 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:48:13,764 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:48:13,764 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. after waiting 0 ms 2024-11-20T14:48:13,764 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T14:48:13,764 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:48:13,764 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T14:48:13,764 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T14:48:13,764 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T14:48:13,774 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T14:48:13,775 DEBUG [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 0a82dd5f2d9d96669a0208e5304ff21e=TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e.} 2024-11-20T14:48:13,775 DEBUG [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1351): Waiting on 0a82dd5f2d9d96669a0208e5304ff21e, 1588230740 2024-11-20T14:48:13,775 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:48:13,775 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:48:13,775 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:48:13,775 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:48:13,775 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:48:13,787 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/default/TestLogRolling-testLogRollOnPipelineRestart/0a82dd5f2d9d96669a0208e5304ff21e/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-20T14:48:13,788 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:48:13,788 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0a82dd5f2d9d96669a0208e5304ff21e: Waiting for close lock at 1732114093764Running coprocessor pre-close hooks at 1732114093764Disabling compacts and flushes for region at 1732114093764Disabling writes for close at 1732114093764Writing region close event to WAL at 1732114093775 (+11 ms)Running coprocessor post-close hooks at 1732114093788 (+13 ms)Closed at 1732114093788 2024-11-20T14:48:13,789 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732114068350.0a82dd5f2d9d96669a0208e5304ff21e. 2024-11-20T14:48:13,807 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T14:48:13,808 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:48:13,808 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:48:13,808 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114093775Running coprocessor pre-close hooks at 1732114093775Disabling compacts and flushes for region at 1732114093775Disabling writes for close at 1732114093775Writing region close event to WAL at 1732114093803 (+28 ms)Running coprocessor post-close hooks at 1732114093807 (+4 ms)Closed at 1732114093808 (+1 ms) 2024-11-20T14:48:13,808 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T14:48:13,975 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(976): stopping server 1a15ecfd95f4,32987,1732114067214; all regions closed. 2024-11-20T14:48:13,975 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,976 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,976 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,976 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,976 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:13,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741840_1023 (size=825) 2024-11-20T14:48:13,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741840_1023 (size=825) 2024-11-20T14:48:14,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:14,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:14,763 INFO [regionserver/1a15ecfd95f4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T14:48:14,763 INFO [regionserver/1a15ecfd95f4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T14:48:15,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:15,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:15,703 INFO [regionserver/1a15ecfd95f4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:48:16,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:16,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:16,445 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T14:48:17,063 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T14:48:17,127 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta after 4002ms 2024-11-20T14:48:17,127 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/WALs/1a15ecfd95f4,32987,1732114067214/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta to hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/oldWALs/1a15ecfd95f4%2C32987%2C1732114067214.meta.1732114068190.meta 2024-11-20T14:48:17,131 DEBUG [RS:0;1a15ecfd95f4:32987 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/oldWALs 2024-11-20T14:48:17,131 INFO [RS:0;1a15ecfd95f4:32987 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C32987%2C1732114067214.meta:.meta(num 1732114093118) 2024-11-20T14:48:17,131 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,131 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,131 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,131 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,132 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741844_1028 (size=1162) 2024-11-20T14:48:17,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741844_1028 (size=1162) 2024-11-20T14:48:17,139 DEBUG [RS:0;1a15ecfd95f4:32987 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/oldWALs 2024-11-20T14:48:17,139 INFO [RS:0;1a15ecfd95f4:32987 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C32987%2C1732114067214:(num 1732114093647) 2024-11-20T14:48:17,139 DEBUG [RS:0;1a15ecfd95f4:32987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:48:17,139 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:48:17,139 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:48:17,139 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.ChoreService(370): Chore service for: regionserver/1a15ecfd95f4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T14:48:17,139 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:48:17,139 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:48:17,140 INFO [RS:0;1a15ecfd95f4:32987 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32987 2024-11-20T14:48:17,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a15ecfd95f4,32987,1732114067214 2024-11-20T14:48:17,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:48:17,187 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:48:17,195 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a15ecfd95f4,32987,1732114067214] 2024-11-20T14:48:17,203 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a15ecfd95f4,32987,1732114067214 already deleted, retry=false 2024-11-20T14:48:17,203 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a15ecfd95f4,32987,1732114067214 expired; onlineServers=0 2024-11-20T14:48:17,203 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a15ecfd95f4,39003,1732114067079' ***** 2024-11-20T14:48:17,203 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T14:48:17,203 INFO [M:0;1a15ecfd95f4:39003 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:48:17,203 INFO [M:0;1a15ecfd95f4:39003 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:48:17,203 DEBUG [M:0;1a15ecfd95f4:39003 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T14:48:17,204 DEBUG [M:0;1a15ecfd95f4:39003 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T14:48:17,204 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T14:48:17,204 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114067543 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114067543,5,FailOnTimeoutGroup] 2024-11-20T14:48:17,204 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114067543 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114067543,5,FailOnTimeoutGroup] 2024-11-20T14:48:17,204 INFO [M:0;1a15ecfd95f4:39003 {}] hbase.ChoreService(370): Chore service for: master/1a15ecfd95f4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T14:48:17,204 INFO [M:0;1a15ecfd95f4:39003 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:48:17,204 DEBUG [M:0;1a15ecfd95f4:39003 {}] master.HMaster(1795): Stopping service threads 2024-11-20T14:48:17,204 INFO [M:0;1a15ecfd95f4:39003 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T14:48:17,204 INFO [M:0;1a15ecfd95f4:39003 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:48:17,204 INFO [M:0;1a15ecfd95f4:39003 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T14:48:17,204 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T14:48:17,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T14:48:17,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:17,212 DEBUG [M:0;1a15ecfd95f4:39003 {}] zookeeper.ZKUtil(347): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T14:48:17,212 WARN [M:0;1a15ecfd95f4:39003 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T14:48:17,212 INFO [M:0;1a15ecfd95f4:39003 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/.lastflushedseqids 2024-11-20T14:48:17,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741846_1030 (size=120) 2024-11-20T14:48:17,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741846_1030 (size=120) 2024-11-20T14:48:17,219 INFO [M:0;1a15ecfd95f4:39003 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T14:48:17,219 INFO [M:0;1a15ecfd95f4:39003 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T14:48:17,219 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:48:17,219 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:48:17,219 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:48:17,219 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:48:17,219 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:48:17,219 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-20T14:48:17,220 ERROR [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData-prefix:1a15ecfd95f4,39003,1732114067079 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:17,220 WARN [FSHLog-0-hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData-prefix:1a15ecfd95f4,39003,1732114067079 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:17,220 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 1a15ecfd95f4%2C39003%2C1732114067079:(num 1732114067348) roll requested 2024-11-20T14:48:17,220 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C39003%2C1732114067079.1732114097220 2024-11-20T14:48:17,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:17,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:17,227 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,227 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,227 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,227 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,228 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,228 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114097220 2024-11-20T14:48:17,228 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:17,228 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37477,DS-d2d636cb-84e8-4f92-b9e9-633d19d92e7f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-20T14:48:17,228 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 2024-11-20T14:48:17,229 WARN [IPC Server handler 2 on default port 44871 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-20T14:48:17,229 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40733:40733),(127.0.0.1/127.0.0.1:43217:43217)] 2024-11-20T14:48:17,229 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 is not closed yet, will try archiving it next time 2024-11-20T14:48:17,229 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 after 1ms 2024-11-20T14:48:17,246 DEBUG [M:0;1a15ecfd95f4:39003 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0648d74f4bf1449aa08a4c2d56b8b96c is 82, key is hbase:meta,,1/info:regioninfo/1732114068224/Put/seqid=0 2024-11-20T14:48:17,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741848_1033 (size=5672) 2024-11-20T14:48:17,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741848_1033 (size=5672) 2024-11-20T14:48:17,251 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0648d74f4bf1449aa08a4c2d56b8b96c 2024-11-20T14:48:17,272 DEBUG [M:0;1a15ecfd95f4:39003 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab1bd82fb5524f3daf4ed0046f9eea79 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732114068731/Put/seqid=0 2024-11-20T14:48:17,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741849_1034 (size=6118) 2024-11-20T14:48:17,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741849_1034 (size=6118) 2024-11-20T14:48:17,278 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab1bd82fb5524f3daf4ed0046f9eea79 2024-11-20T14:48:17,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:48:17,295 INFO [RS:0;1a15ecfd95f4:32987 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:48:17,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32987-0x1015a00c13e0001, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:48:17,295 INFO [RS:0;1a15ecfd95f4:32987 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a15ecfd95f4,32987,1732114067214; zookeeper connection closed. 2024-11-20T14:48:17,296 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7b7e9b77 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7b7e9b77 2024-11-20T14:48:17,296 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T14:48:17,301 DEBUG [M:0;1a15ecfd95f4:39003 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb58b8247c1f45218400b38555827273 is 69, key is 1a15ecfd95f4,32987,1732114067214/rs:state/1732114067681/Put/seqid=0 2024-11-20T14:48:17,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741850_1035 (size=5156) 2024-11-20T14:48:17,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741850_1035 (size=5156) 2024-11-20T14:48:17,306 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb58b8247c1f45218400b38555827273 2024-11-20T14:48:17,327 DEBUG [M:0;1a15ecfd95f4:39003 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8b144c843ffc4e57816f8b18204d196f is 52, key is load_balancer_on/state:d/1732114068346/Put/seqid=0 2024-11-20T14:48:17,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741851_1036 (size=5056) 2024-11-20T14:48:17,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741851_1036 (size=5056) 2024-11-20T14:48:17,344 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8b144c843ffc4e57816f8b18204d196f 2024-11-20T14:48:17,352 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0648d74f4bf1449aa08a4c2d56b8b96c as hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0648d74f4bf1449aa08a4c2d56b8b96c 2024-11-20T14:48:17,358 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0648d74f4bf1449aa08a4c2d56b8b96c, entries=8, sequenceid=56, filesize=5.5 K 2024-11-20T14:48:17,359 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab1bd82fb5524f3daf4ed0046f9eea79 as hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ab1bd82fb5524f3daf4ed0046f9eea79 2024-11-20T14:48:17,365 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ab1bd82fb5524f3daf4ed0046f9eea79, entries=6, sequenceid=56, filesize=6.0 K 2024-11-20T14:48:17,366 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb58b8247c1f45218400b38555827273 as hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bb58b8247c1f45218400b38555827273 2024-11-20T14:48:17,372 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bb58b8247c1f45218400b38555827273, entries=1, sequenceid=56, filesize=5.0 K 2024-11-20T14:48:17,373 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8b144c843ffc4e57816f8b18204d196f as hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8b144c843ffc4e57816f8b18204d196f 2024-11-20T14:48:17,379 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8b144c843ffc4e57816f8b18204d196f, entries=1, sequenceid=56, filesize=4.9 K 2024-11-20T14:48:17,380 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 161ms, sequenceid=56, compaction requested=false 2024-11-20T14:48:17,382 INFO [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:48:17,382 DEBUG [M:0;1a15ecfd95f4:39003 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114097219Disabling compacts and flushes for region at 1732114097219Disabling writes for close at 1732114097219Obtaining lock to block concurrent updates at 1732114097219Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732114097219Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732114097220 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732114097230 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732114097230Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732114097245 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732114097245Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732114097256 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732114097272 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732114097272Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732114097284 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732114097301 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732114097301Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732114097311 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732114097326 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732114097326Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2694b1b2: reopening flushed file at 1732114097350 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19f6e0a9: reopening flushed file at 1732114097358 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5091f8fd: reopening flushed file at 1732114097365 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@82e9261: reopening flushed file at 1732114097372 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 161ms, sequenceid=56, compaction requested=false at 1732114097380 (+8 ms)Writing region close event to WAL at 1732114097381 (+1 ms)Closed at 1732114097381 2024-11-20T14:48:17,382 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,382 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,382 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,382 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,382 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:17,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44217 is added to blk_1073741847_1031 (size=757) 2024-11-20T14:48:17,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39031 is added to blk_1073741847_1031 (size=757) 2024-11-20T14:48:18,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:18,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:18,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:18,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:19,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:19,339 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:48:19,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,370 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:19,446 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-20T14:48:20,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:20,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:20,598 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:48:20,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T14:48:20,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T14:48:20,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-20T14:48:21,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:21,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:21,230 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 after 4002ms 2024-11-20T14:48:21,231 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/WALs/1a15ecfd95f4,39003,1732114067079/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 to hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/oldWALs/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 2024-11-20T14:48:21,234 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/MasterData/oldWALs/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348 to hdfs://localhost:44871/user/jenkins/test-data/bfe446f6-73fb-e9ec-840a-6c8d2996cf48/oldWALs/1a15ecfd95f4%2C39003%2C1732114067079.1732114067348$masterlocalwal$ 2024-11-20T14:48:21,235 INFO [M:0;1a15ecfd95f4:39003 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T14:48:21,235 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:48:21,235 INFO [M:0;1a15ecfd95f4:39003 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39003 2024-11-20T14:48:21,235 INFO [M:0;1a15ecfd95f4:39003 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:48:21,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:48:21,378 INFO [M:0;1a15ecfd95f4:39003 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:48:21,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39003-0x1015a00c13e0000, quorum=127.0.0.1:55126, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:48:21,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d143a5e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:21,381 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1324c29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:48:21,381 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:48:21,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29217bb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:48:21,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f908e41{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,STOPPED} 2024-11-20T14:48:21,382 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:48:21,382 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:48:21,382 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:48:21,382 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1527237828-172.17.0.2-1732114065257 (Datanode Uuid 485151a5-b0d9-49d8-9fb8-a65771eb4352) service to localhost/127.0.0.1:44871 2024-11-20T14:48:21,383 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data3/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:21,383 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data4/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:21,383 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:48:21,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30e88901{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:21,385 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50b2108d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:48:21,385 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:48:21,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a2d8801{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:48:21,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@589b81ae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,STOPPED} 2024-11-20T14:48:21,386 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:48:21,386 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:48:21,386 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:48:21,386 WARN [BP-1527237828-172.17.0.2-1732114065257 heartbeating to localhost/127.0.0.1:44871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1527237828-172.17.0.2-1732114065257 (Datanode Uuid b3676381-e9ef-4489-8ec7-e5041b093e52) service to localhost/127.0.0.1:44871 2024-11-20T14:48:21,387 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data1/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:21,387 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/cluster_ec498f2a-5d73-af08-045b-301e5c8882cf/data/data2/current/BP-1527237828-172.17.0.2-1732114065257 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:48:21,387 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:48:21,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@13b0bc33{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:48:21,393 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5af08dcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:48:21,393 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:48:21,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2475ffae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:48:21,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c8242e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir/,STOPPED} 2024-11-20T14:48:21,399 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T14:48:21,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T14:48:21,436 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:44871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:44871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:44871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:44871 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=258 (was 241) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9579 (was 10135) 2024-11-20T14:48:21,443 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=258, ProcessCount=11, AvailableMemoryMB=9579 2024-11-20T14:48:21,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.log.dir so I do NOT create it in target/test-data/347a1eb6-f706-2df3-2336-66a20d107354 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5ede7288-7586-26d6-f06d-a06565e7c9b3/hadoop.tmp.dir so I do NOT create it in target/test-data/347a1eb6-f706-2df3-2336-66a20d107354 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1, deleteOnExit=true 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/test.cache.data in system properties and HBase conf 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.log.dir in system properties and HBase conf 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T14:48:21,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T14:48:21,444 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:48:21,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T14:48:21,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/nfs.dump.dir in system properties and HBase conf 2024-11-20T14:48:21,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/java.io.tmpdir in system properties and HBase conf 2024-11-20T14:48:21,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:48:21,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T14:48:21,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T14:48:21,462 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:48:21,746 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:48:21,752 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:48:21,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:48:21,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:48:21,753 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:48:21,754 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:48:21,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f0cf80d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:48:21,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48497d3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:48:21,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3962278a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/java.io.tmpdir/jetty-localhost-36249-hadoop-hdfs-3_4_1-tests_jar-_-any-10918125041551488220/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:48:21,867 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c518279{HTTP/1.1, (http/1.1)}{localhost:36249} 2024-11-20T14:48:21,867 INFO [Time-limited test {}] server.Server(415): Started @187066ms 2024-11-20T14:48:21,878 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:48:22,062 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:48:22,065 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:48:22,075 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:48:22,075 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:48:22,075 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:48:22,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3721e694{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:48:22,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5829df96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:48:22,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76c42846{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/java.io.tmpdir/jetty-localhost-34983-hadoop-hdfs-3_4_1-tests_jar-_-any-14264321313991549154/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:22,181 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6bc13808{HTTP/1.1, (http/1.1)}{localhost:34983} 2024-11-20T14:48:22,181 INFO [Time-limited test {}] server.Server(415): Started @187380ms 2024-11-20T14:48:22,182 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:48:22,208 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:48:22,211 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:48:22,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:48:22,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:48:22,212 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:48:22,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1188926{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:48:22,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@218a8176{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:48:22,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:22,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:22,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35649fe3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/java.io.tmpdir/jetty-localhost-34101-hadoop-hdfs-3_4_1-tests_jar-_-any-7721479915065352780/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:48:22,321 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@453c7069{HTTP/1.1, (http/1.1)}{localhost:34101} 2024-11-20T14:48:22,321 INFO [Time-limited test {}] server.Server(415): Started @187520ms 2024-11-20T14:48:22,322 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:48:22,968 WARN [Thread-1647 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/data/data2/current/BP-1701722610-172.17.0.2-1732114101473/current, will proceed with Du for space computation calculation, 2024-11-20T14:48:22,968 WARN [Thread-1646 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/data/data1/current/BP-1701722610-172.17.0.2-1732114101473/current, will proceed with Du for space computation calculation, 2024-11-20T14:48:22,991 WARN [Thread-1610 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:48:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66c61a3f4f5e842f with lease ID 0xbd45d025e9f89966: Processing first storage report for DS-b7249370-2043-48d5-88ab-4eb9e6a0fe0a from datanode DatanodeRegistration(127.0.0.1:40207, datanodeUuid=c11b77a8-d22f-4dbe-8f83-4932c6eb8c12, infoPort=46327, infoSecurePort=0, ipcPort=33105, storageInfo=lv=-57;cid=testClusterID;nsid=1254947083;c=1732114101473) 2024-11-20T14:48:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66c61a3f4f5e842f with lease ID 0xbd45d025e9f89966: from storage DS-b7249370-2043-48d5-88ab-4eb9e6a0fe0a node DatanodeRegistration(127.0.0.1:40207, datanodeUuid=c11b77a8-d22f-4dbe-8f83-4932c6eb8c12, infoPort=46327, infoSecurePort=0, ipcPort=33105, storageInfo=lv=-57;cid=testClusterID;nsid=1254947083;c=1732114101473), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66c61a3f4f5e842f with lease ID 0xbd45d025e9f89966: Processing first storage report for DS-8862eca3-97bf-43d7-b876-c1980398a1ff from datanode DatanodeRegistration(127.0.0.1:40207, datanodeUuid=c11b77a8-d22f-4dbe-8f83-4932c6eb8c12, infoPort=46327, infoSecurePort=0, ipcPort=33105, storageInfo=lv=-57;cid=testClusterID;nsid=1254947083;c=1732114101473) 2024-11-20T14:48:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66c61a3f4f5e842f with lease ID 0xbd45d025e9f89966: from storage DS-8862eca3-97bf-43d7-b876-c1980398a1ff node DatanodeRegistration(127.0.0.1:40207, datanodeUuid=c11b77a8-d22f-4dbe-8f83-4932c6eb8c12, infoPort=46327, infoSecurePort=0, ipcPort=33105, storageInfo=lv=-57;cid=testClusterID;nsid=1254947083;c=1732114101473), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:23,068 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/data/data3/current/BP-1701722610-172.17.0.2-1732114101473/current, will proceed with Du for space computation calculation, 2024-11-20T14:48:23,068 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/data/data4/current/BP-1701722610-172.17.0.2-1732114101473/current, will proceed with Du for space computation calculation, 2024-11-20T14:48:23,088 WARN [Thread-1633 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:48:23,090 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdbe39f2a81507cf7 with lease ID 0xbd45d025e9f89967: Processing first storage report for DS-a836eac5-8f9e-44ef-8c4e-e05e52a1ea72 from datanode DatanodeRegistration(127.0.0.1:44245, datanodeUuid=479c7090-a5cf-42bf-9dc2-c6767d9621bf, infoPort=37069, infoSecurePort=0, ipcPort=35305, storageInfo=lv=-57;cid=testClusterID;nsid=1254947083;c=1732114101473) 2024-11-20T14:48:23,090 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdbe39f2a81507cf7 with lease ID 0xbd45d025e9f89967: from storage DS-a836eac5-8f9e-44ef-8c4e-e05e52a1ea72 node DatanodeRegistration(127.0.0.1:44245, datanodeUuid=479c7090-a5cf-42bf-9dc2-c6767d9621bf, infoPort=37069, infoSecurePort=0, ipcPort=35305, storageInfo=lv=-57;cid=testClusterID;nsid=1254947083;c=1732114101473), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:23,090 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdbe39f2a81507cf7 with lease ID 0xbd45d025e9f89967: Processing first storage report for DS-c9c1f292-eeef-441f-916f-a053265be868 from datanode DatanodeRegistration(127.0.0.1:44245, datanodeUuid=479c7090-a5cf-42bf-9dc2-c6767d9621bf, infoPort=37069, infoSecurePort=0, ipcPort=35305, storageInfo=lv=-57;cid=testClusterID;nsid=1254947083;c=1732114101473) 2024-11-20T14:48:23,090 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdbe39f2a81507cf7 with lease ID 0xbd45d025e9f89967: from storage DS-c9c1f292-eeef-441f-916f-a053265be868 node DatanodeRegistration(127.0.0.1:44245, datanodeUuid=479c7090-a5cf-42bf-9dc2-c6767d9621bf, infoPort=37069, infoSecurePort=0, ipcPort=35305, storageInfo=lv=-57;cid=testClusterID;nsid=1254947083;c=1732114101473), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:48:23,152 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354 2024-11-20T14:48:23,154 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/zookeeper_0, clientPort=61696, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T14:48:23,155 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61696 2024-11-20T14:48:23,156 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:48:23,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:48:23,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:48:23,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:48:23,174 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7 with version=8 2024-11-20T14:48:23,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase-staging 2024-11-20T14:48:23,176 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:48:23,176 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:48:23,176 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:48:23,176 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:48:23,176 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:48:23,176 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:48:23,176 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T14:48:23,176 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:48:23,177 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35577 2024-11-20T14:48:23,179 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35577 connecting to ZooKeeper ensemble=127.0.0.1:61696 2024-11-20T14:48:23,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:355770x0, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:48:23,225 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35577-0x1015a014e370000 connected 2024-11-20T14:48:23,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:23,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:23,286 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:48:23,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:48:23,291 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:48:23,292 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7, hbase.cluster.distributed=false 2024-11-20T14:48:23,294 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:48:23,294 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35577 2024-11-20T14:48:23,294 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35577 2024-11-20T14:48:23,295 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35577 2024-11-20T14:48:23,295 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35577 2024-11-20T14:48:23,295 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35577 2024-11-20T14:48:23,314 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:48:23,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:48:23,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:48:23,315 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:48:23,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:48:23,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:48:23,315 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T14:48:23,315 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:48:23,316 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35523 2024-11-20T14:48:23,318 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35523 connecting to ZooKeeper ensemble=127.0.0.1:61696 2024-11-20T14:48:23,319 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:48:23,320 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:48:23,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355230x0, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:48:23,328 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:355230x0, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:48:23,328 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35523-0x1015a014e370001 connected 2024-11-20T14:48:23,328 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T14:48:23,329 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T14:48:23,329 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T14:48:23,330 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:48:23,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35523 2024-11-20T14:48:23,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35523 2024-11-20T14:48:23,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35523 2024-11-20T14:48:23,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35523 2024-11-20T14:48:23,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35523 2024-11-20T14:48:23,344 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a15ecfd95f4:35577 2024-11-20T14:48:23,344 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:23,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:48:23,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:48:23,353 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:23,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T14:48:23,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,361 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T14:48:23,363 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a15ecfd95f4,35577,1732114103176 from backup master directory 2024-11-20T14:48:23,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:48:23,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:23,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:48:23,369 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:48:23,369 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:23,374 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/hbase.id] with ID: d3cd7399-f51c-4b17-aaa5-643a5e94d5e3 2024-11-20T14:48:23,375 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/.tmp/hbase.id 2024-11-20T14:48:23,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:48:23,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:48:23,386 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/.tmp/hbase.id]:[hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/hbase.id] 2024-11-20T14:48:23,398 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:48:23,398 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T14:48:23,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T14:48:23,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:48:23,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:48:23,418 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:48:23,419 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T14:48:23,419 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:48:23,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:48:23,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:48:23,427 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store 2024-11-20T14:48:23,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:48:23,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:48:23,435 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:48:23,435 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:48:23,435 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:48:23,435 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:48:23,435 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:48:23,435 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:48:23,435 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:48:23,435 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114103435Disabling compacts and flushes for region at 1732114103435Disabling writes for close at 1732114103435Writing region close event to WAL at 1732114103435Closed at 1732114103435 2024-11-20T14:48:23,436 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/.initializing 2024-11-20T14:48:23,436 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/WALs/1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:23,439 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C35577%2C1732114103176, suffix=, logDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/WALs/1a15ecfd95f4,35577,1732114103176, archiveDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/oldWALs, maxLogs=10 2024-11-20T14:48:23,439 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C35577%2C1732114103176.1732114103439 2024-11-20T14:48:23,444 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/WALs/1a15ecfd95f4,35577,1732114103176/1a15ecfd95f4%2C35577%2C1732114103176.1732114103439 2024-11-20T14:48:23,445 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46327:46327),(127.0.0.1/127.0.0.1:37069:37069)] 2024-11-20T14:48:23,445 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:48:23,445 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:48:23,446 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,446 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,448 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T14:48:23,448 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:23,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,450 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T14:48:23,450 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:48:23,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,452 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T14:48:23,452 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:48:23,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T14:48:23,454 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:48:23,455 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,456 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,456 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,458 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,458 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,459 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T14:48:23,460 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:48:23,462 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:48:23,463 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843915, jitterRate=0.07309453189373016}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T14:48:23,463 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732114103446Initializing all the Stores at 1732114103446Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114103446Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114103447 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114103447Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114103447Cleaning up temporary data from old regions at 1732114103458 (+11 ms)Region opened successfully at 1732114103463 (+5 ms) 2024-11-20T14:48:23,463 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T14:48:23,466 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c939020, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:48:23,467 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T14:48:23,467 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T14:48:23,467 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T14:48:23,467 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T14:48:23,468 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T14:48:23,468 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T14:48:23,468 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T14:48:23,470 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T14:48:23,471 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T14:48:23,486 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T14:48:23,486 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T14:48:23,487 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T14:48:23,494 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T14:48:23,494 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T14:48:23,495 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T14:48:23,502 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T14:48:23,503 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T14:48:23,511 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T14:48:23,513 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T14:48:23,519 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T14:48:23,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:48:23,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:48:23,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,528 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a15ecfd95f4,35577,1732114103176, sessionid=0x1015a014e370000, setting cluster-up flag (Was=false) 2024-11-20T14:48:23,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,594 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T14:48:23,595 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:23,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:23,636 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T14:48:23,637 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:23,638 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T14:48:23,640 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T14:48:23,640 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T14:48:23,640 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T14:48:23,641 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a15ecfd95f4,35577,1732114103176 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T14:48:23,642 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:48:23,642 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:48:23,642 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:48:23,642 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:48:23,642 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a15ecfd95f4:0, corePoolSize=10, maxPoolSize=10 2024-11-20T14:48:23,643 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,643 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:48:23,643 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,644 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732114133644 2024-11-20T14:48:23,644 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T14:48:23,644 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T14:48:23,644 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T14:48:23,644 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T14:48:23,644 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T14:48:23,644 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T14:48:23,644 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,645 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:48:23,645 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T14:48:23,645 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T14:48:23,645 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T14:48:23,645 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T14:48:23,645 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T14:48:23,645 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T14:48:23,646 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114103645,5,FailOnTimeoutGroup] 2024-11-20T14:48:23,646 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114103646,5,FailOnTimeoutGroup] 2024-11-20T14:48:23,646 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,646 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,646 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T14:48:23,646 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,646 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,646 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T14:48:23,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:48:23,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:48:23,655 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T14:48:23,655 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7 2024-11-20T14:48:23,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:48:23,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:48:23,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:48:23,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:48:23,664 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:48:23,664 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:23,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:48:23,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:48:23,666 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:23,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:48:23,668 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:48:23,668 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:23,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:48:23,670 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:48:23,670 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:23,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:23,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:48:23,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740 2024-11-20T14:48:23,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740 2024-11-20T14:48:23,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:48:23,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:48:23,674 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:48:23,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:48:23,678 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:48:23,678 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841192, jitterRate=0.06963150203227997}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:48:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732114103661Initializing all the Stores at 1732114103662 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114103662Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114103663 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114103663Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114103663Cleaning up temporary data from old regions at 1732114103674 (+11 ms)Region opened successfully at 1732114103679 (+5 ms) 2024-11-20T14:48:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:48:23,679 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:48:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:48:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:48:23,679 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:48:23,680 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:48:23,680 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114103679Disabling compacts and flushes for region at 1732114103679Disabling writes for close at 1732114103679Writing region close event to WAL at 1732114103680 (+1 ms)Closed at 1732114103680 2024-11-20T14:48:23,681 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:48:23,681 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T14:48:23,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T14:48:23,683 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:48:23,685 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T14:48:23,739 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(746): ClusterId : d3cd7399-f51c-4b17-aaa5-643a5e94d5e3 2024-11-20T14:48:23,739 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T14:48:23,753 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T14:48:23,753 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T14:48:23,761 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T14:48:23,762 DEBUG [RS:0;1a15ecfd95f4:35523 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d06a5b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:48:23,775 DEBUG [RS:0;1a15ecfd95f4:35523 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a15ecfd95f4:35523 2024-11-20T14:48:23,775 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T14:48:23,775 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T14:48:23,775 DEBUG [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T14:48:23,775 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a15ecfd95f4,35577,1732114103176 with port=35523, startcode=1732114103314 2024-11-20T14:48:23,776 DEBUG [RS:0;1a15ecfd95f4:35523 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T14:48:23,778 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33559, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T14:48:23,778 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35577 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:23,778 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35577 {}] master.ServerManager(517): Registering regionserver=1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:23,780 DEBUG [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7 2024-11-20T14:48:23,780 DEBUG [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34069 2024-11-20T14:48:23,780 DEBUG [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T14:48:23,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:48:23,786 DEBUG [RS:0;1a15ecfd95f4:35523 {}] zookeeper.ZKUtil(111): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:23,786 WARN [RS:0;1a15ecfd95f4:35523 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:48:23,786 INFO [RS:0;1a15ecfd95f4:35523 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:48:23,786 DEBUG [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:23,787 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a15ecfd95f4,35523,1732114103314] 2024-11-20T14:48:23,789 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T14:48:23,791 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T14:48:23,791 INFO [RS:0;1a15ecfd95f4:35523 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:48:23,792 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,792 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T14:48:23,793 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T14:48:23,793 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,793 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:48:23,794 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:48:23,794 DEBUG [RS:0;1a15ecfd95f4:35523 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:48:23,795 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,795 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,795 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,795 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,795 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,795 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35523,1732114103314-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:48:23,812 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T14:48:23,813 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35523,1732114103314-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,813 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,813 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.Replication(171): 1a15ecfd95f4,35523,1732114103314 started 2024-11-20T14:48:23,828 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:23,829 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1482): Serving as 1a15ecfd95f4,35523,1732114103314, RpcServer on 1a15ecfd95f4/172.17.0.2:35523, sessionid=0x1015a014e370001 2024-11-20T14:48:23,829 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T14:48:23,829 DEBUG [RS:0;1a15ecfd95f4:35523 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:23,829 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,35523,1732114103314' 2024-11-20T14:48:23,829 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T14:48:23,829 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T14:48:23,830 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T14:48:23,830 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T14:48:23,830 DEBUG [RS:0;1a15ecfd95f4:35523 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:23,830 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,35523,1732114103314' 2024-11-20T14:48:23,830 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T14:48:23,830 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T14:48:23,830 DEBUG [RS:0;1a15ecfd95f4:35523 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T14:48:23,831 INFO [RS:0;1a15ecfd95f4:35523 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T14:48:23,831 INFO [RS:0;1a15ecfd95f4:35523 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T14:48:23,835 WARN [1a15ecfd95f4:35577 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-20T14:48:23,932 INFO [RS:0;1a15ecfd95f4:35523 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C35523%2C1732114103314, suffix=, logDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314, archiveDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/oldWALs, maxLogs=32 2024-11-20T14:48:23,933 INFO [RS:0;1a15ecfd95f4:35523 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C35523%2C1732114103314.1732114103933 2024-11-20T14:48:23,938 INFO [RS:0;1a15ecfd95f4:35523 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114103933 2024-11-20T14:48:23,939 DEBUG [RS:0;1a15ecfd95f4:35523 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46327:46327),(127.0.0.1/127.0.0.1:37069:37069)] 2024-11-20T14:48:24,085 DEBUG [1a15ecfd95f4:35577 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T14:48:24,086 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:24,087 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,35523,1732114103314, state=OPENING 2024-11-20T14:48:24,117 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T14:48:24,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:24,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:48:24,128 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:48:24,128 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:48:24,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,35523,1732114103314}] 2024-11-20T14:48:24,128 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:48:24,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:24,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:24,282 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T14:48:24,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54657, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T14:48:24,291 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T14:48:24,291 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:48:24,293 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C35523%2C1732114103314.meta, suffix=.meta, logDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314, archiveDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/oldWALs, maxLogs=32 2024-11-20T14:48:24,294 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C35523%2C1732114103314.meta.1732114104294.meta 2024-11-20T14:48:24,300 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.meta.1732114104294.meta 2024-11-20T14:48:24,310 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37069:37069),(127.0.0.1/127.0.0.1:46327:46327)] 2024-11-20T14:48:24,311 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:48:24,311 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T14:48:24,311 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T14:48:24,311 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T14:48:24,312 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T14:48:24,312 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:48:24,312 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T14:48:24,312 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T14:48:24,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:48:24,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:48:24,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:24,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:24,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:48:24,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:48:24,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:24,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:24,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:48:24,319 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:48:24,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:24,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:24,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:48:24,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:48:24,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:24,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:48:24,322 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:48:24,322 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740 2024-11-20T14:48:24,324 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740 2024-11-20T14:48:24,325 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:48:24,325 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:48:24,325 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:48:24,327 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:48:24,327 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779026, jitterRate=-0.009417474269866943}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:48:24,328 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T14:48:24,328 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732114104312Writing region info on filesystem at 1732114104312Initializing all the Stores at 1732114104313 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114104313Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114104315 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114104315Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114104315Cleaning up temporary data from old regions at 1732114104325 (+10 ms)Running coprocessor post-open hooks at 1732114104328 (+3 ms)Region opened successfully at 1732114104328 2024-11-20T14:48:24,329 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732114104282 2024-11-20T14:48:24,332 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T14:48:24,332 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T14:48:24,333 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:24,334 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,35523,1732114103314, state=OPEN 2024-11-20T14:48:24,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:48:24,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:48:24,374 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:24,374 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:48:24,374 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:48:24,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T14:48:24,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,35523,1732114103314 in 246 msec 2024-11-20T14:48:24,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T14:48:24,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 696 msec 2024-11-20T14:48:24,381 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:48:24,381 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T14:48:24,383 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:48:24,383 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,35523,1732114103314, seqNum=-1] 2024-11-20T14:48:24,383 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:48:24,385 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34781, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:48:24,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 751 msec 2024-11-20T14:48:24,391 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732114104391, completionTime=-1 2024-11-20T14:48:24,391 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T14:48:24,392 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732114164394 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732114224394 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35577,1732114103176-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35577,1732114103176-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35577,1732114103176-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a15ecfd95f4:35577, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:24,394 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:24,395 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:24,397 DEBUG [master/1a15ecfd95f4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T14:48:24,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.029sec 2024-11-20T14:48:24,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T14:48:24,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T14:48:24,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T14:48:24,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T14:48:24,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T14:48:24,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35577,1732114103176-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:48:24,399 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35577,1732114103176-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T14:48:24,401 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T14:48:24,401 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T14:48:24,401 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35577,1732114103176-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:48:24,439 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d852c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:48:24,439 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a15ecfd95f4,35577,-1 for getting cluster id 2024-11-20T14:48:24,439 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T14:48:24,441 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd3cd7399-f51c-4b17-aaa5-643a5e94d5e3' 2024-11-20T14:48:24,442 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T14:48:24,442 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d3cd7399-f51c-4b17-aaa5-643a5e94d5e3" 2024-11-20T14:48:24,442 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70d7f554, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:48:24,442 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a15ecfd95f4,35577,-1] 2024-11-20T14:48:24,443 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T14:48:24,443 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:48:24,444 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38528, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T14:48:24,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@226126fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:48:24,446 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:48:24,447 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,35523,1732114103314, seqNum=-1] 2024-11-20T14:48:24,447 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:48:24,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:48:24,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:24,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:48:24,458 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T14:48:24,458 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T14:48:24,459 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a15ecfd95f4,35577,1732114103176 2024-11-20T14:48:24,460 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@56089d27 2024-11-20T14:48:24,460 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T14:48:24,461 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T14:48:24,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T14:48:24,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T14:48:24,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:48:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:24,465 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T14:48:24,465 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:24,465 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-20T14:48:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:48:24,466 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T14:48:24,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741835_1011 (size=405) 2024-11-20T14:48:24,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741835_1011 (size=405) 2024-11-20T14:48:24,483 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fce2782eceb464eedd1c21e095cb6144, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7 2024-11-20T14:48:24,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741836_1012 (size=88) 2024-11-20T14:48:24,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741836_1012 (size=88) 2024-11-20T14:48:24,490 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:48:24,490 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing fce2782eceb464eedd1c21e095cb6144, disabling compactions & flushes 2024-11-20T14:48:24,490 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:24,490 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:24,490 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. after waiting 0 ms 2024-11-20T14:48:24,490 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:24,490 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:24,490 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for fce2782eceb464eedd1c21e095cb6144: Waiting for close lock at 1732114104490Disabling compacts and flushes for region at 1732114104490Disabling writes for close at 1732114104490Writing region close event to WAL at 1732114104490Closed at 1732114104490 2024-11-20T14:48:24,492 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T14:48:24,492 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732114104492"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732114104492"}]},"ts":"1732114104492"} 2024-11-20T14:48:24,495 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T14:48:24,496 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T14:48:24,496 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732114104496"}]},"ts":"1732114104496"} 2024-11-20T14:48:24,498 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-20T14:48:24,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=fce2782eceb464eedd1c21e095cb6144, ASSIGN}] 2024-11-20T14:48:24,500 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=fce2782eceb464eedd1c21e095cb6144, ASSIGN 2024-11-20T14:48:24,501 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=fce2782eceb464eedd1c21e095cb6144, ASSIGN; state=OFFLINE, location=1a15ecfd95f4,35523,1732114103314; forceNewPlan=false, retain=false 2024-11-20T14:48:24,652 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fce2782eceb464eedd1c21e095cb6144, regionState=OPENING, regionLocation=1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:24,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=fce2782eceb464eedd1c21e095cb6144, ASSIGN because future has completed 2024-11-20T14:48:24,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fce2782eceb464eedd1c21e095cb6144, server=1a15ecfd95f4,35523,1732114103314}] 2024-11-20T14:48:24,814 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:24,814 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => fce2782eceb464eedd1c21e095cb6144, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:48:24,815 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,815 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:48:24,815 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,815 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,817 INFO [StoreOpener-fce2782eceb464eedd1c21e095cb6144-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,819 INFO [StoreOpener-fce2782eceb464eedd1c21e095cb6144-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fce2782eceb464eedd1c21e095cb6144 columnFamilyName info 2024-11-20T14:48:24,819 DEBUG [StoreOpener-fce2782eceb464eedd1c21e095cb6144-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:48:24,820 INFO [StoreOpener-fce2782eceb464eedd1c21e095cb6144-1 {}] regionserver.HStore(327): Store=fce2782eceb464eedd1c21e095cb6144/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:48:24,820 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,821 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,821 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,822 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,822 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,824 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,827 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:48:24,828 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened fce2782eceb464eedd1c21e095cb6144; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871644, jitterRate=0.10835275053977966}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T14:48:24,828 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:48:24,829 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for fce2782eceb464eedd1c21e095cb6144: Running coprocessor pre-open hook at 1732114104815Writing region info on filesystem at 1732114104815Initializing all the Stores at 1732114104816 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114104817 (+1 ms)Cleaning up temporary data from old regions at 1732114104822 (+5 ms)Running coprocessor post-open hooks at 1732114104828 (+6 ms)Region opened successfully at 1732114104829 (+1 ms) 2024-11-20T14:48:24,830 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144., pid=6, masterSystemTime=1732114104809 2024-11-20T14:48:24,833 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:24,833 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:24,835 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fce2782eceb464eedd1c21e095cb6144, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,35523,1732114103314 2024-11-20T14:48:24,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fce2782eceb464eedd1c21e095cb6144, server=1a15ecfd95f4,35523,1732114103314 because future has completed 2024-11-20T14:48:24,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T14:48:24,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure fce2782eceb464eedd1c21e095cb6144, server=1a15ecfd95f4,35523,1732114103314 in 184 msec 2024-11-20T14:48:24,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T14:48:24,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=fce2782eceb464eedd1c21e095cb6144, ASSIGN in 344 msec 2024-11-20T14:48:24,847 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T14:48:24,848 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732114104847"}]},"ts":"1732114104847"} 2024-11-20T14:48:24,850 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-20T14:48:24,851 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T14:48:24,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 389 msec 2024-11-20T14:48:25,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:25,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:26,101 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:48:26,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,123 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:48:26,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:26,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:27,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:27,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:28,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:28,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:29,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:29,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:29,790 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T14:48:29,790 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-20T14:48:30,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:30,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:30,598 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T14:48:30,598 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T14:48:30,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:48:30,599 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T14:48:30,600 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T14:48:30,600 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T14:48:30,600 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:30,600 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T14:48:31,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:31,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:32,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:32,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:33,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:33,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:34,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:34,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:34,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:48:34,525 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T14:48:34,525 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-20T14:48:34,528 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:34,528 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:34,532 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144., hostname=1a15ecfd95f4,35523,1732114103314, seqNum=2] 2024-11-20T14:48:34,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:34,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:34,547 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T14:48:34,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-20T14:48:34,559 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T14:48:34,561 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T14:48:34,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-20T14:48:34,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:34,726 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing fce2782eceb464eedd1c21e095cb6144 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T14:48:34,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/6e435ac4c0284a36955c61d126883a39 is 1080, key is row0001/info:/1732114114533/Put/seqid=0 2024-11-20T14:48:34,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741837_1013 (size=6033) 2024-11-20T14:48:34,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741837_1013 (size=6033) 2024-11-20T14:48:34,749 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/6e435ac4c0284a36955c61d126883a39 2024-11-20T14:48:34,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/6e435ac4c0284a36955c61d126883a39 as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/6e435ac4c0284a36955c61d126883a39 2024-11-20T14:48:34,769 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/6e435ac4c0284a36955c61d126883a39, entries=1, sequenceid=5, filesize=5.9 K 2024-11-20T14:48:34,770 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for fce2782eceb464eedd1c21e095cb6144 in 44ms, sequenceid=5, compaction requested=false 2024-11-20T14:48:34,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for fce2782eceb464eedd1c21e095cb6144: 2024-11-20T14:48:34,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:34,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-20T14:48:34,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-20T14:48:34,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-20T14:48:34,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 214 msec 2024-11-20T14:48:34,783 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 239 msec 2024-11-20T14:48:35,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:35,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:36,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:36,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:37,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:37,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:38,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:38,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:39,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:39,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:40,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:40,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:41,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:41,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:42,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:42,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:43,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:43,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:44,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:44,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:44,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-20T14:48:44,614 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T14:48:44,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:44,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-20T14:48:44,619 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T14:48:44,620 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T14:48:44,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T14:48:44,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-20T14:48:44,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:44,775 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing fce2782eceb464eedd1c21e095cb6144 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T14:48:44,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/dedec5fe34c44d1588727c67ed2536de is 1080, key is row0002/info:/1732114124615/Put/seqid=0 2024-11-20T14:48:44,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741838_1014 (size=6033) 2024-11-20T14:48:44,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741838_1014 (size=6033) 2024-11-20T14:48:44,810 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/dedec5fe34c44d1588727c67ed2536de 2024-11-20T14:48:44,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/dedec5fe34c44d1588727c67ed2536de as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/dedec5fe34c44d1588727c67ed2536de 2024-11-20T14:48:44,824 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/dedec5fe34c44d1588727c67ed2536de, entries=1, sequenceid=9, filesize=5.9 K 2024-11-20T14:48:44,825 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for fce2782eceb464eedd1c21e095cb6144 in 51ms, sequenceid=9, compaction requested=false 2024-11-20T14:48:44,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for fce2782eceb464eedd1c21e095cb6144: 2024-11-20T14:48:44,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:44,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-20T14:48:44,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-20T14:48:44,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T14:48:44,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-20T14:48:44,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 213 msec 2024-11-20T14:48:45,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:45,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:46,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:46,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:46,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 after 68059ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:46,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta after 68051ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T14:48:47,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:47,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:48,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:48,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:49,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:49,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:50,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:50,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:51,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:51,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:52,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:52,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:53,152 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T14:48:53,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:53,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:54,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:54,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:54,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-20T14:48:54,654 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T14:48:54,656 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C35523%2C1732114103314.1732114134656 2024-11-20T14:48:54,662 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:54,662 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:54,662 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:54,662 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:54,662 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:48:54,663 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114103933 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114134656 2024-11-20T14:48:54,664 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37069:37069),(127.0.0.1/127.0.0.1:46327:46327)] 2024-11-20T14:48:54,664 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114103933 is not closed yet, will try archiving it next time 2024-11-20T14:48:54,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741833_1009 (size=5546) 2024-11-20T14:48:54,665 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:54,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741833_1009 (size=5546) 2024-11-20T14:48:54,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:48:54,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-20T14:48:54,667 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T14:48:54,669 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T14:48:54,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T14:48:54,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-20T14:48:54,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:54,823 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing fce2782eceb464eedd1c21e095cb6144 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T14:48:54,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/5be4a1317a674917b7c770fc9dc9e17b is 1080, key is row0003/info:/1732114134655/Put/seqid=0 2024-11-20T14:48:54,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741840_1016 (size=6033) 2024-11-20T14:48:54,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741840_1016 (size=6033) 2024-11-20T14:48:54,834 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/5be4a1317a674917b7c770fc9dc9e17b 2024-11-20T14:48:54,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/5be4a1317a674917b7c770fc9dc9e17b as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/5be4a1317a674917b7c770fc9dc9e17b 2024-11-20T14:48:54,845 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/5be4a1317a674917b7c770fc9dc9e17b, entries=1, sequenceid=13, filesize=5.9 K 2024-11-20T14:48:54,846 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for fce2782eceb464eedd1c21e095cb6144 in 23ms, sequenceid=13, compaction requested=true 2024-11-20T14:48:54,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for fce2782eceb464eedd1c21e095cb6144: 2024-11-20T14:48:54,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:48:54,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-20T14:48:54,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-20T14:48:54,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-20T14:48:54,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-20T14:48:54,853 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-20T14:48:55,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:55,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:56,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:56,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:57,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:57,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:58,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:58,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:59,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:48:59,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:00,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:00,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:01,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:01,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:02,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:02,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:03,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:03,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:04,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:04,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:04,424 INFO [master/1a15ecfd95f4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T14:49:04,424 INFO [master/1a15ecfd95f4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T14:49:04,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-20T14:49:04,694 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T14:49:04,694 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:49:04,696 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:49:04,696 DEBUG [Time-limited test {}] regionserver.HStore(1541): fce2782eceb464eedd1c21e095cb6144/info is initiating minor compaction (all files) 2024-11-20T14:49:04,696 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:49:04,696 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:04,696 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of fce2782eceb464eedd1c21e095cb6144/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:49:04,696 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/6e435ac4c0284a36955c61d126883a39, hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/dedec5fe34c44d1588727c67ed2536de, hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/5be4a1317a674917b7c770fc9dc9e17b] into tmpdir=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp, totalSize=17.7 K 2024-11-20T14:49:04,697 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 6e435ac4c0284a36955c61d126883a39, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732114114533 2024-11-20T14:49:04,697 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting dedec5fe34c44d1588727c67ed2536de, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732114124615 2024-11-20T14:49:04,697 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5be4a1317a674917b7c770fc9dc9e17b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732114134655 2024-11-20T14:49:04,710 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): fce2782eceb464eedd1c21e095cb6144#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:04,711 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/dba2d21fcfd24291a70f6837c640729b is 1080, key is row0001/info:/1732114114533/Put/seqid=0 2024-11-20T14:49:04,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741841_1017 (size=8296) 2024-11-20T14:49:04,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741841_1017 (size=8296) 2024-11-20T14:49:04,726 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/dba2d21fcfd24291a70f6837c640729b as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/dba2d21fcfd24291a70f6837c640729b 2024-11-20T14:49:04,733 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in fce2782eceb464eedd1c21e095cb6144/info of fce2782eceb464eedd1c21e095cb6144 into dba2d21fcfd24291a70f6837c640729b(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:04,733 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for fce2782eceb464eedd1c21e095cb6144: 2024-11-20T14:49:04,736 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C35523%2C1732114103314.1732114144736 2024-11-20T14:49:04,742 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:04,742 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:04,742 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:04,742 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:04,742 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:04,742 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114134656 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114144736 2024-11-20T14:49:04,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741839_1015 (size=2520) 2024-11-20T14:49:04,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741839_1015 (size=2520) 2024-11-20T14:49:04,747 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46327:46327),(127.0.0.1/127.0.0.1:37069:37069)] 2024-11-20T14:49:04,747 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114134656 is not closed yet, will try archiving it next time 2024-11-20T14:49:04,747 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114103933 to hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/oldWALs/1a15ecfd95f4%2C35523%2C1732114103314.1732114103933 2024-11-20T14:49:04,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:49:04,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:49:04,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-20T14:49:04,750 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-20T14:49:04,752 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T14:49:04,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T14:49:04,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-20T14:49:04,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:49:04,906 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing fce2782eceb464eedd1c21e095cb6144 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T14:49:04,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/9115da44f8854cdf82149e422f327f6f is 1080, key is row0000/info:/1732114144735/Put/seqid=0 2024-11-20T14:49:04,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741843_1019 (size=6033) 2024-11-20T14:49:04,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741843_1019 (size=6033) 2024-11-20T14:49:04,916 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/9115da44f8854cdf82149e422f327f6f 2024-11-20T14:49:04,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/9115da44f8854cdf82149e422f327f6f as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/9115da44f8854cdf82149e422f327f6f 2024-11-20T14:49:04,930 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/9115da44f8854cdf82149e422f327f6f, entries=1, sequenceid=18, filesize=5.9 K 2024-11-20T14:49:04,931 INFO [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for fce2782eceb464eedd1c21e095cb6144 in 25ms, sequenceid=18, compaction requested=false 2024-11-20T14:49:04,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for fce2782eceb464eedd1c21e095cb6144: 2024-11-20T14:49:04,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:49:04,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-20T14:49:04,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-20T14:49:04,936 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-20T14:49:04,936 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-20T14:49:04,939 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-20T14:49:05,145 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114134656 to hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/oldWALs/1a15ecfd95f4%2C35523%2C1732114103314.1732114134656 2024-11-20T14:49:05,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:05,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:06,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:06,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:07,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:07,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:08,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:08,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:09,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:09,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:09,815 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fce2782eceb464eedd1c21e095cb6144, had cached 0 bytes from a total of 14329 2024-11-20T14:49:10,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:10,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:11,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:11,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:12,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:12,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:13,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:13,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:14,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:14,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:14,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-20T14:49:14,834 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-20T14:49:14,838 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C35523%2C1732114103314.1732114154838 2024-11-20T14:49:14,848 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:14,848 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:14,848 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:14,848 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:14,848 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:14,848 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114144736 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/WALs/1a15ecfd95f4,35523,1732114103314/1a15ecfd95f4%2C35523%2C1732114103314.1732114154838 2024-11-20T14:49:14,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741842_1018 (size=2026) 2024-11-20T14:49:14,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741842_1018 (size=2026) 2024-11-20T14:49:14,855 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46327:46327),(127.0.0.1/127.0.0.1:37069:37069)] 2024-11-20T14:49:14,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T14:49:14,856 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:49:14,856 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:49:14,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:49:14,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:49:14,856 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T14:49:14,856 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T14:49:14,856 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1758487653, stopped=false 2024-11-20T14:49:14,856 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a15ecfd95f4,35577,1732114103176 2024-11-20T14:49:14,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:49:14,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:49:14,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:14,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:14,881 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:49:14,881 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:49:14,881 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:49:14,881 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:49:14,882 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a15ecfd95f4,35523,1732114103314' ***** 2024-11-20T14:49:14,882 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T14:49:14,882 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T14:49:14,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:49:14,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:49:14,882 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T14:49:14,882 INFO [RS:0;1a15ecfd95f4:35523 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T14:49:14,882 INFO [RS:0;1a15ecfd95f4:35523 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T14:49:14,882 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(3091): Received CLOSE for fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:49:14,883 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(959): stopping server 1a15ecfd95f4,35523,1732114103314 2024-11-20T14:49:14,883 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:49:14,883 INFO [RS:0;1a15ecfd95f4:35523 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a15ecfd95f4:35523. 2024-11-20T14:49:14,883 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fce2782eceb464eedd1c21e095cb6144, disabling compactions & flushes 2024-11-20T14:49:14,883 DEBUG [RS:0;1a15ecfd95f4:35523 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:49:14,883 DEBUG [RS:0;1a15ecfd95f4:35523 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:49:14,883 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:49:14,883 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:49:14,883 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T14:49:14,883 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. after waiting 0 ms 2024-11-20T14:49:14,883 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T14:49:14,883 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T14:49:14,883 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:49:14,883 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T14:49:14,883 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing fce2782eceb464eedd1c21e095cb6144 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-20T14:49:14,883 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-20T14:49:14,883 DEBUG [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1325): Online Regions={fce2782eceb464eedd1c21e095cb6144=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T14:49:14,883 DEBUG [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, fce2782eceb464eedd1c21e095cb6144 2024-11-20T14:49:14,884 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:49:14,884 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:49:14,884 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:49:14,884 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:49:14,884 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:49:14,884 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-20T14:49:14,888 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/93967dc235b341b1824f5d5f539f8c0d is 1080, key is row0001/info:/1732114154836/Put/seqid=0 2024-11-20T14:49:14,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741845_1021 (size=6033) 2024-11-20T14:49:14,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741845_1021 (size=6033) 2024-11-20T14:49:14,894 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/93967dc235b341b1824f5d5f539f8c0d 2024-11-20T14:49:14,901 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/.tmp/info/93967dc235b341b1824f5d5f539f8c0d as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/93967dc235b341b1824f5d5f539f8c0d 2024-11-20T14:49:14,903 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/info/899a0eec35c94e8fb31928fb696c8514 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144./info:regioninfo/1732114104834/Put/seqid=0 2024-11-20T14:49:14,907 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/93967dc235b341b1824f5d5f539f8c0d, entries=1, sequenceid=22, filesize=5.9 K 2024-11-20T14:49:14,908 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for fce2782eceb464eedd1c21e095cb6144 in 25ms, sequenceid=22, compaction requested=true 2024-11-20T14:49:14,910 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/6e435ac4c0284a36955c61d126883a39, hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/dedec5fe34c44d1588727c67ed2536de, hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/5be4a1317a674917b7c770fc9dc9e17b] to archive 2024-11-20T14:49:14,911 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T14:49:14,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741846_1022 (size=7308) 2024-11-20T14:49:14,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741846_1022 (size=7308) 2024-11-20T14:49:14,912 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/info/899a0eec35c94e8fb31928fb696c8514 2024-11-20T14:49:14,913 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/6e435ac4c0284a36955c61d126883a39 to hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/6e435ac4c0284a36955c61d126883a39 2024-11-20T14:49:14,914 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/dedec5fe34c44d1588727c67ed2536de to hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/dedec5fe34c44d1588727c67ed2536de 2024-11-20T14:49:14,915 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/5be4a1317a674917b7c770fc9dc9e17b to hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/info/5be4a1317a674917b7c770fc9dc9e17b 2024-11-20T14:49:14,915 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a15ecfd95f4:35577 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-20T14:49:14,916 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6e435ac4c0284a36955c61d126883a39=6033, dedec5fe34c44d1588727c67ed2536de=6033, 5be4a1317a674917b7c770fc9dc9e17b=6033] 2024-11-20T14:49:14,920 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/fce2782eceb464eedd1c21e095cb6144/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-20T14:49:14,920 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:49:14,920 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fce2782eceb464eedd1c21e095cb6144: Waiting for close lock at 1732114154883Running coprocessor pre-close hooks at 1732114154883Disabling compacts and flushes for region at 1732114154883Disabling writes for close at 1732114154883Obtaining lock to block concurrent updates at 1732114154883Preparing flush snapshotting stores in fce2782eceb464eedd1c21e095cb6144 at 1732114154883Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732114154884 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. at 1732114154884Flushing fce2782eceb464eedd1c21e095cb6144/info: creating writer at 1732114154884Flushing fce2782eceb464eedd1c21e095cb6144/info: appending metadata at 1732114154888 (+4 ms)Flushing fce2782eceb464eedd1c21e095cb6144/info: closing flushed file at 1732114154888Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c65a3c6: reopening flushed file at 1732114154900 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for fce2782eceb464eedd1c21e095cb6144 in 25ms, sequenceid=22, compaction requested=true at 1732114154908 (+8 ms)Writing region close event to WAL at 1732114154916 (+8 ms)Running coprocessor post-close hooks at 1732114154920 (+4 ms)Closed at 1732114154920 2024-11-20T14:49:14,920 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732114104461.fce2782eceb464eedd1c21e095cb6144. 2024-11-20T14:49:14,932 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/ns/f6d737ea57c04d80bea79346892b1141 is 43, key is default/ns:d/1732114104385/Put/seqid=0 2024-11-20T14:49:14,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741847_1023 (size=5153) 2024-11-20T14:49:14,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741847_1023 (size=5153) 2024-11-20T14:49:14,936 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/ns/f6d737ea57c04d80bea79346892b1141 2024-11-20T14:49:14,955 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/table/41963b1d5cda484c8c05ab493ae18d59 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732114104847/Put/seqid=0 2024-11-20T14:49:14,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741848_1024 (size=5508) 2024-11-20T14:49:14,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741848_1024 (size=5508) 2024-11-20T14:49:14,960 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/table/41963b1d5cda484c8c05ab493ae18d59 2024-11-20T14:49:14,966 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/info/899a0eec35c94e8fb31928fb696c8514 as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/info/899a0eec35c94e8fb31928fb696c8514 2024-11-20T14:49:14,972 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/info/899a0eec35c94e8fb31928fb696c8514, entries=10, sequenceid=11, filesize=7.1 K 2024-11-20T14:49:14,973 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/ns/f6d737ea57c04d80bea79346892b1141 as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/ns/f6d737ea57c04d80bea79346892b1141 2024-11-20T14:49:14,979 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/ns/f6d737ea57c04d80bea79346892b1141, entries=2, sequenceid=11, filesize=5.0 K 2024-11-20T14:49:14,980 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/.tmp/table/41963b1d5cda484c8c05ab493ae18d59 as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/table/41963b1d5cda484c8c05ab493ae18d59 2024-11-20T14:49:14,985 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/table/41963b1d5cda484c8c05ab493ae18d59, entries=2, sequenceid=11, filesize=5.4 K 2024-11-20T14:49:14,986 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false 2024-11-20T14:49:14,996 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-20T14:49:14,996 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:49:14,996 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:49:14,996 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114154883Running coprocessor pre-close hooks at 1732114154883Disabling compacts and flushes for region at 1732114154883Disabling writes for close at 1732114154884 (+1 ms)Obtaining lock to block concurrent updates at 1732114154884Preparing flush snapshotting stores in 1588230740 at 1732114154884Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732114154884Flushing stores of hbase:meta,,1.1588230740 at 1732114154885 (+1 ms)Flushing 1588230740/info: creating writer at 1732114154885Flushing 1588230740/info: appending metadata at 1732114154903 (+18 ms)Flushing 1588230740/info: closing flushed file at 1732114154903Flushing 1588230740/ns: creating writer at 1732114154917 (+14 ms)Flushing 1588230740/ns: appending metadata at 1732114154931 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732114154931Flushing 1588230740/table: creating writer at 1732114154941 (+10 ms)Flushing 1588230740/table: appending metadata at 1732114154954 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732114154954Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a02eda4: reopening flushed file at 1732114154965 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66c8deba: reopening flushed file at 1732114154972 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55962d0c: reopening flushed file at 1732114154979 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 102ms, sequenceid=11, compaction requested=false at 1732114154986 (+7 ms)Writing region close event to WAL at 1732114154987 (+1 ms)Running coprocessor post-close hooks at 1732114154996 (+9 ms)Closed at 1732114154996 2024-11-20T14:49:14,996 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T14:49:15,084 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(976): stopping server 1a15ecfd95f4,35523,1732114103314; all regions closed. 2024-11-20T14:49:15,084 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,084 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,084 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,084 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,085 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741834_1010 (size=3306) 2024-11-20T14:49:15,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741834_1010 (size=3306) 2024-11-20T14:49:15,089 DEBUG [RS:0;1a15ecfd95f4:35523 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/oldWALs 2024-11-20T14:49:15,089 INFO [RS:0;1a15ecfd95f4:35523 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C35523%2C1732114103314.meta:.meta(num 1732114104294) 2024-11-20T14:49:15,090 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,090 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,090 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,090 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,090 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741844_1020 (size=1252) 2024-11-20T14:49:15,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741844_1020 (size=1252) 2024-11-20T14:49:15,095 DEBUG [RS:0;1a15ecfd95f4:35523 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/oldWALs 2024-11-20T14:49:15,095 INFO [RS:0;1a15ecfd95f4:35523 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C35523%2C1732114103314:(num 1732114154838) 2024-11-20T14:49:15,095 DEBUG [RS:0;1a15ecfd95f4:35523 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:49:15,095 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:49:15,095 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:49:15,095 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.ChoreService(370): Chore service for: regionserver/1a15ecfd95f4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T14:49:15,095 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:49:15,096 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:49:15,096 INFO [RS:0;1a15ecfd95f4:35523 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35523 2024-11-20T14:49:15,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a15ecfd95f4,35523,1732114103314 2024-11-20T14:49:15,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:49:15,123 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:49:15,123 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a15ecfd95f4,35523,1732114103314] 2024-11-20T14:49:15,139 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a15ecfd95f4,35523,1732114103314 already deleted, retry=false 2024-11-20T14:49:15,139 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a15ecfd95f4,35523,1732114103314 expired; onlineServers=0 2024-11-20T14:49:15,139 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a15ecfd95f4,35577,1732114103176' ***** 2024-11-20T14:49:15,139 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T14:49:15,139 INFO [M:0;1a15ecfd95f4:35577 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:49:15,139 INFO [M:0;1a15ecfd95f4:35577 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:49:15,139 DEBUG [M:0;1a15ecfd95f4:35577 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T14:49:15,139 DEBUG [M:0;1a15ecfd95f4:35577 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T14:49:15,139 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T14:49:15,139 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114103646 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114103646,5,FailOnTimeoutGroup] 2024-11-20T14:49:15,139 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114103645 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114103645,5,FailOnTimeoutGroup] 2024-11-20T14:49:15,140 INFO [M:0;1a15ecfd95f4:35577 {}] hbase.ChoreService(370): Chore service for: master/1a15ecfd95f4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T14:49:15,140 INFO [M:0;1a15ecfd95f4:35577 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:49:15,140 DEBUG [M:0;1a15ecfd95f4:35577 {}] master.HMaster(1795): Stopping service threads 2024-11-20T14:49:15,140 INFO [M:0;1a15ecfd95f4:35577 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T14:49:15,140 INFO [M:0;1a15ecfd95f4:35577 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:49:15,140 INFO [M:0;1a15ecfd95f4:35577 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T14:49:15,140 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T14:49:15,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T14:49:15,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:15,147 DEBUG [M:0;1a15ecfd95f4:35577 {}] zookeeper.ZKUtil(347): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T14:49:15,148 WARN [M:0;1a15ecfd95f4:35577 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T14:49:15,148 INFO [M:0;1a15ecfd95f4:35577 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/.lastflushedseqids 2024-11-20T14:49:15,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741849_1025 (size=130) 2024-11-20T14:49:15,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741849_1025 (size=130) 2024-11-20T14:49:15,156 INFO [M:0;1a15ecfd95f4:35577 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T14:49:15,156 INFO [M:0;1a15ecfd95f4:35577 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T14:49:15,156 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:49:15,156 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:49:15,156 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:49:15,156 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:49:15,156 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:49:15,157 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-11-20T14:49:15,175 DEBUG [M:0;1a15ecfd95f4:35577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/97f8d9d0c2b64a05aba3b78f2c156fc4 is 82, key is hbase:meta,,1/info:regioninfo/1732114104333/Put/seqid=0 2024-11-20T14:49:15,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741850_1026 (size=5672) 2024-11-20T14:49:15,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741850_1026 (size=5672) 2024-11-20T14:49:15,180 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/97f8d9d0c2b64a05aba3b78f2c156fc4 2024-11-20T14:49:15,201 DEBUG [M:0;1a15ecfd95f4:35577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/391fdad7a8f7441aae5a53aeeb585eda is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732114104853/Put/seqid=0 2024-11-20T14:49:15,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741851_1027 (size=7825) 2024-11-20T14:49:15,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741851_1027 (size=7825) 2024-11-20T14:49:15,206 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/391fdad7a8f7441aae5a53aeeb585eda 2024-11-20T14:49:15,210 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 391fdad7a8f7441aae5a53aeeb585eda 2024-11-20T14:49:15,226 DEBUG [M:0;1a15ecfd95f4:35577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaa05d8e558144fc8e9a760bac58ab0a is 69, key is 1a15ecfd95f4,35523,1732114103314/rs:state/1732114103778/Put/seqid=0 2024-11-20T14:49:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741852_1028 (size=5156) 2024-11-20T14:49:15,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:49:15,231 INFO [RS:0;1a15ecfd95f4:35523 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:49:15,231 INFO [RS:0;1a15ecfd95f4:35523 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a15ecfd95f4,35523,1732114103314; zookeeper connection closed. 2024-11-20T14:49:15,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35523-0x1015a014e370001, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:49:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741852_1028 (size=5156) 2024-11-20T14:49:15,231 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@40ccc26a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@40ccc26a 2024-11-20T14:49:15,231 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaa05d8e558144fc8e9a760bac58ab0a 2024-11-20T14:49:15,231 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T14:49:15,252 DEBUG [M:0;1a15ecfd95f4:35577 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3363d33a22004704b6562201af76ef40 is 52, key is load_balancer_on/state:d/1732114104457/Put/seqid=0 2024-11-20T14:49:15,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741853_1029 (size=5056) 2024-11-20T14:49:15,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741853_1029 (size=5056) 2024-11-20T14:49:15,258 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3363d33a22004704b6562201af76ef40 2024-11-20T14:49:15,263 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/97f8d9d0c2b64a05aba3b78f2c156fc4 as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/97f8d9d0c2b64a05aba3b78f2c156fc4 2024-11-20T14:49:15,269 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/97f8d9d0c2b64a05aba3b78f2c156fc4, entries=8, sequenceid=121, filesize=5.5 K 2024-11-20T14:49:15,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:15,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:15,270 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/391fdad7a8f7441aae5a53aeeb585eda as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/391fdad7a8f7441aae5a53aeeb585eda 2024-11-20T14:49:15,276 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 391fdad7a8f7441aae5a53aeeb585eda 2024-11-20T14:49:15,276 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/391fdad7a8f7441aae5a53aeeb585eda, entries=14, sequenceid=121, filesize=7.6 K 2024-11-20T14:49:15,277 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaa05d8e558144fc8e9a760bac58ab0a as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aaa05d8e558144fc8e9a760bac58ab0a 2024-11-20T14:49:15,283 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aaa05d8e558144fc8e9a760bac58ab0a, entries=1, sequenceid=121, filesize=5.0 K 2024-11-20T14:49:15,285 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3363d33a22004704b6562201af76ef40 as hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3363d33a22004704b6562201af76ef40 2024-11-20T14:49:15,290 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34069/user/jenkins/test-data/4ae10758-484b-c00d-b75e-060554f89ec7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3363d33a22004704b6562201af76ef40, entries=1, sequenceid=121, filesize=4.9 K 2024-11-20T14:49:15,292 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=121, compaction requested=false 2024-11-20T14:49:15,293 INFO [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:49:15,293 DEBUG [M:0;1a15ecfd95f4:35577 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114155156Disabling compacts and flushes for region at 1732114155156Disabling writes for close at 1732114155156Obtaining lock to block concurrent updates at 1732114155157 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732114155157Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44659, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1732114155157Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732114155158 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732114155158Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732114155174 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732114155174Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732114155185 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732114155200 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732114155200Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732114155210 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732114155226 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732114155226Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732114155236 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732114155252 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732114155252Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7451270: reopening flushed file at 1732114155262 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9927c0c: reopening flushed file at 1732114155269 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@277e745a: reopening flushed file at 1732114155276 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39c7224: reopening flushed file at 1732114155284 (+8 ms)Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=121, compaction requested=false at 1732114155292 (+8 ms)Writing region close event to WAL at 1732114155293 (+1 ms)Closed at 1732114155293 2024-11-20T14:49:15,294 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,294 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,294 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,294 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,294 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:49:15,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741830_1006 (size=53056) 2024-11-20T14:49:15,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40207 is added to blk_1073741830_1006 (size=53056) 2024-11-20T14:49:15,297 INFO [M:0;1a15ecfd95f4:35577 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T14:49:15,297 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:49:15,297 INFO [M:0;1a15ecfd95f4:35577 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35577 2024-11-20T14:49:15,297 INFO [M:0;1a15ecfd95f4:35577 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:49:15,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:49:15,429 INFO [M:0;1a15ecfd95f4:35577 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:49:15,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35577-0x1015a014e370000, quorum=127.0.0.1:61696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:49:15,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35649fe3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:49:15,432 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@453c7069{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:49:15,432 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:49:15,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@218a8176{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:49:15,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1188926{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.log.dir/,STOPPED} 2024-11-20T14:49:15,434 WARN [BP-1701722610-172.17.0.2-1732114101473 heartbeating to localhost/127.0.0.1:34069 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:49:15,434 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:49:15,434 WARN [BP-1701722610-172.17.0.2-1732114101473 heartbeating to localhost/127.0.0.1:34069 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1701722610-172.17.0.2-1732114101473 (Datanode Uuid 479c7090-a5cf-42bf-9dc2-c6767d9621bf) service to localhost/127.0.0.1:34069 2024-11-20T14:49:15,434 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:49:15,435 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/data/data3/current/BP-1701722610-172.17.0.2-1732114101473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:49:15,435 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/data/data4/current/BP-1701722610-172.17.0.2-1732114101473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:49:15,436 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:49:15,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76c42846{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:49:15,438 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6bc13808{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:49:15,438 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:49:15,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5829df96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:49:15,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3721e694{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.log.dir/,STOPPED} 2024-11-20T14:49:15,440 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:49:15,440 WARN [BP-1701722610-172.17.0.2-1732114101473 heartbeating to localhost/127.0.0.1:34069 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:49:15,440 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:49:15,440 WARN [BP-1701722610-172.17.0.2-1732114101473 heartbeating to localhost/127.0.0.1:34069 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1701722610-172.17.0.2-1732114101473 (Datanode Uuid c11b77a8-d22f-4dbe-8f83-4932c6eb8c12) service to localhost/127.0.0.1:34069 2024-11-20T14:49:15,441 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/data/data1/current/BP-1701722610-172.17.0.2-1732114101473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:49:15,441 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/cluster_ef5bd88b-3bb3-ec8a-0f60-0e3d0312f1f1/data/data2/current/BP-1701722610-172.17.0.2-1732114101473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:49:15,441 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:49:15,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3962278a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:49:15,449 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c518279{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:49:15,449 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:49:15,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48497d3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:49:15,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f0cf80d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.log.dir/,STOPPED} 2024-11-20T14:49:15,455 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T14:49:15,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T14:49:15,482 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34069 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34069 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34069 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34069 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:34069 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34069 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34069 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/1a15ecfd95f4:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34069 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=195 (was 258), ProcessCount=11 (was 11), AvailableMemoryMB=9380 (was 9579) 2024-11-20T14:49:15,489 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=195, ProcessCount=11, AvailableMemoryMB=9380 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.log.dir so I do NOT create it in target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/347a1eb6-f706-2df3-2336-66a20d107354/hadoop.tmp.dir so I do NOT create it in target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef, deleteOnExit=true 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/test.cache.data in system properties and HBase conf 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.log.dir in system properties and HBase conf 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T14:49:15,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T14:49:15,491 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T14:49:15,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/nfs.dump.dir in system properties and HBase conf 2024-11-20T14:49:15,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/java.io.tmpdir in system properties and HBase conf 2024-11-20T14:49:15,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:49:15,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T14:49:15,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T14:49:15,504 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:49:15,788 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:49:15,793 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:49:15,794 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:49:15,794 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:49:15,794 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:49:15,795 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:49:15,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f3cebf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:49:15,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7319bc41{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:49:15,797 INFO [regionserver/1a15ecfd95f4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:49:15,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c994a01{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/java.io.tmpdir/jetty-localhost-38777-hadoop-hdfs-3_4_1-tests_jar-_-any-6503542299689431812/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:49:15,898 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@19e3a83f{HTTP/1.1, (http/1.1)}{localhost:38777} 2024-11-20T14:49:15,898 INFO [Time-limited test {}] server.Server(415): Started @241098ms 2024-11-20T14:49:15,911 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:49:16,158 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:49:16,161 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:49:16,161 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:49:16,161 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:49:16,162 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:49:16,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bbefcfb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:49:16,163 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3480767c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:49:16,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@303a5a71{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/java.io.tmpdir/jetty-localhost-44051-hadoop-hdfs-3_4_1-tests_jar-_-any-9815158682514937712/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:49:16,266 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a5f8367{HTTP/1.1, (http/1.1)}{localhost:44051} 2024-11-20T14:49:16,266 INFO [Time-limited test {}] server.Server(415): Started @241465ms 2024-11-20T14:49:16,267 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:49:16,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:16,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:16,296 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:49:16,298 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:49:16,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:49:16,299 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:49:16,299 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:49:16,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73b2b928{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:49:16,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7dd0dd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:49:16,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76d165ee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/java.io.tmpdir/jetty-localhost-39877-hadoop-hdfs-3_4_1-tests_jar-_-any-7992316419564200177/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:49:16,403 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3ed34521{HTTP/1.1, (http/1.1)}{localhost:39877} 2024-11-20T14:49:16,403 INFO [Time-limited test {}] server.Server(415): Started @241602ms 2024-11-20T14:49:16,404 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:49:17,116 WARN [Thread-1962 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/data/data2/current/BP-190161143-172.17.0.2-1732114155508/current, will proceed with Du for space computation calculation, 2024-11-20T14:49:17,116 WARN [Thread-1961 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/data/data1/current/BP-190161143-172.17.0.2-1732114155508/current, will proceed with Du for space computation calculation, 2024-11-20T14:49:17,133 WARN [Thread-1925 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:49:17,135 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fda622f734b75bc with lease ID 0x901f9274497c1bd1: Processing first storage report for DS-2568e69a-ebcd-40e6-aee7-658e7d2a7711 from datanode DatanodeRegistration(127.0.0.1:42195, datanodeUuid=1ef7c392-9054-4371-a123-e78abbe43054, infoPort=33067, infoSecurePort=0, ipcPort=45839, storageInfo=lv=-57;cid=testClusterID;nsid=1792355333;c=1732114155508) 2024-11-20T14:49:17,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fda622f734b75bc with lease ID 0x901f9274497c1bd1: from storage DS-2568e69a-ebcd-40e6-aee7-658e7d2a7711 node DatanodeRegistration(127.0.0.1:42195, datanodeUuid=1ef7c392-9054-4371-a123-e78abbe43054, infoPort=33067, infoSecurePort=0, ipcPort=45839, storageInfo=lv=-57;cid=testClusterID;nsid=1792355333;c=1732114155508), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:49:17,135 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fda622f734b75bc with lease ID 0x901f9274497c1bd1: Processing first storage report for DS-d7110042-2c76-42df-99a7-30db20d6b742 from datanode DatanodeRegistration(127.0.0.1:42195, datanodeUuid=1ef7c392-9054-4371-a123-e78abbe43054, infoPort=33067, infoSecurePort=0, ipcPort=45839, storageInfo=lv=-57;cid=testClusterID;nsid=1792355333;c=1732114155508) 2024-11-20T14:49:17,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fda622f734b75bc with lease ID 0x901f9274497c1bd1: from storage DS-d7110042-2c76-42df-99a7-30db20d6b742 node DatanodeRegistration(127.0.0.1:42195, datanodeUuid=1ef7c392-9054-4371-a123-e78abbe43054, infoPort=33067, infoSecurePort=0, ipcPort=45839, storageInfo=lv=-57;cid=testClusterID;nsid=1792355333;c=1732114155508), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:49:17,217 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/data/data3/current/BP-190161143-172.17.0.2-1732114155508/current, will proceed with Du for space computation calculation, 2024-11-20T14:49:17,218 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/data/data4/current/BP-190161143-172.17.0.2-1732114155508/current, will proceed with Du for space computation calculation, 2024-11-20T14:49:17,233 WARN [Thread-1948 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:49:17,235 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4993f15a6909e4c with lease ID 0x901f9274497c1bd2: Processing first storage report for DS-356225b5-645b-4526-8397-8cc81106e8af from datanode DatanodeRegistration(127.0.0.1:36291, datanodeUuid=ec2ef208-900f-42d5-b516-fc86d7ab2a11, infoPort=35095, infoSecurePort=0, ipcPort=45983, storageInfo=lv=-57;cid=testClusterID;nsid=1792355333;c=1732114155508) 2024-11-20T14:49:17,235 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4993f15a6909e4c with lease ID 0x901f9274497c1bd2: from storage DS-356225b5-645b-4526-8397-8cc81106e8af node DatanodeRegistration(127.0.0.1:36291, datanodeUuid=ec2ef208-900f-42d5-b516-fc86d7ab2a11, infoPort=35095, infoSecurePort=0, ipcPort=45983, storageInfo=lv=-57;cid=testClusterID;nsid=1792355333;c=1732114155508), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:49:17,235 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4993f15a6909e4c with lease ID 0x901f9274497c1bd2: Processing first storage report for DS-6f765725-399f-4777-a871-1d08f53cac13 from datanode DatanodeRegistration(127.0.0.1:36291, datanodeUuid=ec2ef208-900f-42d5-b516-fc86d7ab2a11, infoPort=35095, infoSecurePort=0, ipcPort=45983, storageInfo=lv=-57;cid=testClusterID;nsid=1792355333;c=1732114155508) 2024-11-20T14:49:17,235 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4993f15a6909e4c with lease ID 0x901f9274497c1bd2: from storage DS-6f765725-399f-4777-a871-1d08f53cac13 node DatanodeRegistration(127.0.0.1:36291, datanodeUuid=ec2ef208-900f-42d5-b516-fc86d7ab2a11, infoPort=35095, infoSecurePort=0, ipcPort=45983, storageInfo=lv=-57;cid=testClusterID;nsid=1792355333;c=1732114155508), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:49:17,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:17,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:17,335 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f 2024-11-20T14:49:17,340 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/zookeeper_0, clientPort=49307, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T14:49:17,341 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49307 2024-11-20T14:49:17,341 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:49:17,343 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:49:17,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:49:17,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:49:17,363 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a with version=8 2024-11-20T14:49:17,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase-staging 2024-11-20T14:49:17,365 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:49:17,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:49:17,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:49:17,365 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:49:17,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:49:17,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:49:17,365 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T14:49:17,365 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:49:17,366 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35909 2024-11-20T14:49:17,367 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35909 connecting to ZooKeeper ensemble=127.0.0.1:49307 2024-11-20T14:49:17,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359090x0, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:49:17,426 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35909-0x1015a0221df0000 connected 2024-11-20T14:49:17,521 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:49:17,523 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:49:17,526 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:49:17,526 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a, hbase.cluster.distributed=false 2024-11-20T14:49:17,529 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:49:17,530 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35909 2024-11-20T14:49:17,530 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35909 2024-11-20T14:49:17,531 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35909 2024-11-20T14:49:17,531 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35909 2024-11-20T14:49:17,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35909 2024-11-20T14:49:17,548 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:49:17,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:49:17,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:49:17,548 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:49:17,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:49:17,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:49:17,548 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T14:49:17,549 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:49:17,549 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38145 2024-11-20T14:49:17,551 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38145 connecting to ZooKeeper ensemble=127.0.0.1:49307 2024-11-20T14:49:17,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:49:17,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:49:17,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:381450x0, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:49:17,562 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:381450x0, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:49:17,562 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38145-0x1015a0221df0001 connected 2024-11-20T14:49:17,563 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T14:49:17,563 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T14:49:17,564 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T14:49:17,564 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:49:17,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38145 2024-11-20T14:49:17,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38145 2024-11-20T14:49:17,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38145 2024-11-20T14:49:17,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38145 2024-11-20T14:49:17,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38145 2024-11-20T14:49:17,581 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a15ecfd95f4:35909 2024-11-20T14:49:17,581 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:17,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:49:17,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:49:17,592 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:17,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T14:49:17,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,604 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T14:49:17,604 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a15ecfd95f4,35909,1732114157364 from backup master directory 2024-11-20T14:49:17,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:17,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:49:17,612 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:49:17,612 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:17,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:49:17,617 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/hbase.id] with ID: 99a9e3d2-8f8e-4641-aa11-8c4e135cc9be 2024-11-20T14:49:17,617 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/.tmp/hbase.id 2024-11-20T14:49:17,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:49:17,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:49:17,624 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/.tmp/hbase.id]:[hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/hbase.id] 2024-11-20T14:49:17,634 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:49:17,634 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T14:49:17,635 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T14:49:17,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:49:17,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:49:17,652 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:49:17,653 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T14:49:17,653 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:49:17,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:49:17,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:49:17,661 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store 2024-11-20T14:49:17,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:49:17,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:49:17,667 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:49:17,667 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:49:17,668 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:49:17,668 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:49:17,668 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:49:17,668 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:49:17,668 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:49:17,668 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114157667Disabling compacts and flushes for region at 1732114157667Disabling writes for close at 1732114157668 (+1 ms)Writing region close event to WAL at 1732114157668Closed at 1732114157668 2024-11-20T14:49:17,669 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/.initializing 2024-11-20T14:49:17,669 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/WALs/1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:17,671 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C35909%2C1732114157364, suffix=, logDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/WALs/1a15ecfd95f4,35909,1732114157364, archiveDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/oldWALs, maxLogs=10 2024-11-20T14:49:17,672 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C35909%2C1732114157364.1732114157672 2024-11-20T14:49:17,677 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/WALs/1a15ecfd95f4,35909,1732114157364/1a15ecfd95f4%2C35909%2C1732114157364.1732114157672 2024-11-20T14:49:17,678 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35095:35095),(127.0.0.1/127.0.0.1:33067:33067)] 2024-11-20T14:49:17,678 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:49:17,678 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:49:17,678 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,678 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,691 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,692 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T14:49:17,692 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:17,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T14:49:17,694 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:49:17,695 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T14:49:17,696 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:49:17,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,697 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T14:49:17,697 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:49:17,698 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,699 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,699 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,701 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,701 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,701 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T14:49:17,702 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:49:17,705 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:49:17,705 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740175, jitterRate=-0.05881945788860321}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T14:49:17,706 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732114157679Initializing all the Stores at 1732114157679Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114157679Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114157690 (+11 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114157690Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114157690Cleaning up temporary data from old regions at 1732114157701 (+11 ms)Region opened successfully at 1732114157706 (+5 ms) 2024-11-20T14:49:17,706 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T14:49:17,709 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@144790f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:49:17,710 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T14:49:17,711 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T14:49:17,711 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T14:49:17,711 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T14:49:17,711 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T14:49:17,712 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T14:49:17,712 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T14:49:17,714 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T14:49:17,715 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T14:49:17,737 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T14:49:17,737 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T14:49:17,738 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T14:49:17,745 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T14:49:17,746 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T14:49:17,746 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T14:49:17,754 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T14:49:17,755 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T14:49:17,762 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T14:49:17,764 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T14:49:17,770 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T14:49:17,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:49:17,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:49:17,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,780 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a15ecfd95f4,35909,1732114157364, sessionid=0x1015a0221df0000, setting cluster-up flag (Was=false) 2024-11-20T14:49:17,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,845 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T14:49:17,846 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:17,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:17,887 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T14:49:17,888 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:17,889 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T14:49:17,890 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T14:49:17,891 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T14:49:17,891 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T14:49:17,891 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a15ecfd95f4,35909,1732114157364 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T14:49:17,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:49:17,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:49:17,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:49:17,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:49:17,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a15ecfd95f4:0, corePoolSize=10, maxPoolSize=10 2024-11-20T14:49:17,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:17,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:49:17,892 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:17,895 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732114187895 2024-11-20T14:49:17,895 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T14:49:17,895 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T14:49:17,895 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T14:49:17,895 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T14:49:17,896 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T14:49:17,896 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T14:49:17,896 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:49:17,896 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T14:49:17,896 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:17,896 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T14:49:17,896 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T14:49:17,897 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T14:49:17,897 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,897 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T14:49:17,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T14:49:17,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T14:49:17,898 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114157898,5,FailOnTimeoutGroup] 2024-11-20T14:49:17,898 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114157898,5,FailOnTimeoutGroup] 2024-11-20T14:49:17,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:17,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T14:49:17,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:17,898 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:17,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:49:17,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:49:17,904 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T14:49:17,904 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a 2024-11-20T14:49:17,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:49:17,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:49:17,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:49:17,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:49:17,912 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:49:17,912 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:17,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:49:17,914 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:49:17,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:17,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:49:17,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:49:17,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:17,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:49:17,918 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:49:17,918 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:17,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:17,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:49:17,919 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740 2024-11-20T14:49:17,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740 2024-11-20T14:49:17,921 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:49:17,921 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:49:17,921 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:49:17,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:49:17,925 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:49:17,925 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699984, jitterRate=-0.10992434620857239}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:49:17,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732114157910Initializing all the Stores at 1732114157910Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114157910Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114157911 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114157911Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114157911Cleaning up temporary data from old regions at 1732114157921 (+10 ms)Region opened successfully at 1732114157926 (+5 ms) 2024-11-20T14:49:17,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:49:17,926 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:49:17,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:49:17,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:49:17,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:49:17,926 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:49:17,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114157926Disabling compacts and flushes for region at 1732114157926Disabling writes for close at 1732114157926Writing region close event to WAL at 1732114157926Closed at 1732114157926 2024-11-20T14:49:17,928 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:49:17,928 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T14:49:17,928 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T14:49:17,929 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:49:17,930 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T14:49:17,968 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(746): ClusterId : 99a9e3d2-8f8e-4641-aa11-8c4e135cc9be 2024-11-20T14:49:17,968 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T14:49:17,993 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T14:49:17,993 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T14:49:18,013 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T14:49:18,013 DEBUG [RS:0;1a15ecfd95f4:38145 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24cd27c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:49:18,026 DEBUG [RS:0;1a15ecfd95f4:38145 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a15ecfd95f4:38145 2024-11-20T14:49:18,026 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T14:49:18,026 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T14:49:18,026 DEBUG [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T14:49:18,027 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a15ecfd95f4,35909,1732114157364 with port=38145, startcode=1732114157548 2024-11-20T14:49:18,027 DEBUG [RS:0;1a15ecfd95f4:38145 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T14:49:18,029 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54103, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T14:49:18,030 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35909 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,030 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35909 {}] master.ServerManager(517): Registering regionserver=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,031 DEBUG [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a 2024-11-20T14:49:18,031 DEBUG [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34623 2024-11-20T14:49:18,031 DEBUG [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T14:49:18,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:49:18,043 DEBUG [RS:0;1a15ecfd95f4:38145 {}] zookeeper.ZKUtil(111): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,043 WARN [RS:0;1a15ecfd95f4:38145 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:49:18,043 INFO [RS:0;1a15ecfd95f4:38145 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:49:18,043 DEBUG [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,043 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a15ecfd95f4,38145,1732114157548] 2024-11-20T14:49:18,046 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T14:49:18,048 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T14:49:18,048 INFO [RS:0;1a15ecfd95f4:38145 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:49:18,048 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,048 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T14:49:18,049 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T14:49:18,049 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,049 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,050 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,050 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,050 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:49:18,050 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:49:18,050 DEBUG [RS:0;1a15ecfd95f4:38145 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:49:18,050 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,050 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,050 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,050 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,050 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,050 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38145,1732114157548-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:49:18,067 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T14:49:18,067 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,38145,1732114157548-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,067 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,067 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.Replication(171): 1a15ecfd95f4,38145,1732114157548 started 2024-11-20T14:49:18,081 WARN [1a15ecfd95f4:35909 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-20T14:49:18,083 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,083 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1482): Serving as 1a15ecfd95f4,38145,1732114157548, RpcServer on 1a15ecfd95f4/172.17.0.2:38145, sessionid=0x1015a0221df0001 2024-11-20T14:49:18,083 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T14:49:18,083 DEBUG [RS:0;1a15ecfd95f4:38145 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,083 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,38145,1732114157548' 2024-11-20T14:49:18,083 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T14:49:18,084 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T14:49:18,084 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T14:49:18,084 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T14:49:18,084 DEBUG [RS:0;1a15ecfd95f4:38145 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,084 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,38145,1732114157548' 2024-11-20T14:49:18,084 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T14:49:18,084 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T14:49:18,085 DEBUG [RS:0;1a15ecfd95f4:38145 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T14:49:18,085 INFO [RS:0;1a15ecfd95f4:38145 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T14:49:18,085 INFO [RS:0;1a15ecfd95f4:38145 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T14:49:18,186 INFO [RS:0;1a15ecfd95f4:38145 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C38145%2C1732114157548, suffix=, logDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548, archiveDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/oldWALs, maxLogs=32 2024-11-20T14:49:18,187 INFO [RS:0;1a15ecfd95f4:38145 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38145%2C1732114157548.1732114158187 2024-11-20T14:49:18,192 INFO [RS:0;1a15ecfd95f4:38145 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114158187 2024-11-20T14:49:18,192 DEBUG [RS:0;1a15ecfd95f4:38145 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35095:35095),(127.0.0.1/127.0.0.1:33067:33067)] 2024-11-20T14:49:18,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:18,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:18,331 DEBUG [1a15ecfd95f4:35909 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T14:49:18,332 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,333 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,38145,1732114157548, state=OPENING 2024-11-20T14:49:18,362 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T14:49:18,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:18,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:49:18,371 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:49:18,371 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:49:18,371 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:49:18,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,38145,1732114157548}] 2024-11-20T14:49:18,524 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T14:49:18,527 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47185, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T14:49:18,531 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T14:49:18,531 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:49:18,533 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C38145%2C1732114157548.meta, suffix=.meta, logDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548, archiveDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/oldWALs, maxLogs=32 2024-11-20T14:49:18,534 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38145%2C1732114157548.meta.1732114158533.meta 2024-11-20T14:49:18,541 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.meta.1732114158533.meta 2024-11-20T14:49:18,542 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33067:33067),(127.0.0.1/127.0.0.1:35095:35095)] 2024-11-20T14:49:18,543 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:49:18,543 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T14:49:18,543 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T14:49:18,543 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T14:49:18,544 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T14:49:18,544 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:49:18,544 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T14:49:18,544 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T14:49:18,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:49:18,546 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:49:18,546 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:18,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:18,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:49:18,548 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:49:18,548 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:18,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:18,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:49:18,549 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:49:18,549 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:18,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:18,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:49:18,551 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:49:18,551 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:18,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:49:18,551 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:49:18,552 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740 2024-11-20T14:49:18,553 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740 2024-11-20T14:49:18,554 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:49:18,555 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:49:18,555 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:49:18,557 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:49:18,557 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789526, jitterRate=0.003934323787689209}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:49:18,558 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T14:49:18,558 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732114158544Writing region info on filesystem at 1732114158544Initializing all the Stores at 1732114158545 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114158545Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114158545Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114158545Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114158545Cleaning up temporary data from old regions at 1732114158555 (+10 ms)Running coprocessor post-open hooks at 1732114158558 (+3 ms)Region opened successfully at 1732114158558 2024-11-20T14:49:18,559 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732114158524 2024-11-20T14:49:18,561 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T14:49:18,562 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T14:49:18,562 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,563 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,38145,1732114157548, state=OPEN 2024-11-20T14:49:18,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:49:18,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:49:18,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:49:18,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:49:18,617 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T14:49:18,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,38145,1732114157548 in 246 msec 2024-11-20T14:49:18,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T14:49:18,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 694 msec 2024-11-20T14:49:18,627 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:49:18,627 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T14:49:18,628 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:49:18,628 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,38145,1732114157548, seqNum=-1] 2024-11-20T14:49:18,629 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:49:18,630 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49847, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:49:18,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 745 msec 2024-11-20T14:49:18,636 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732114158636, completionTime=-1 2024-11-20T14:49:18,636 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T14:49:18,636 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-20T14:49:18,638 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-20T14:49:18,638 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732114218638 2024-11-20T14:49:18,638 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732114278638 2024-11-20T14:49:18,638 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-20T14:49:18,639 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35909,1732114157364-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,639 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35909,1732114157364-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,639 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35909,1732114157364-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,639 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a15ecfd95f4:35909, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,639 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,639 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,641 DEBUG [master/1a15ecfd95f4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T14:49:18,643 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.031sec 2024-11-20T14:49:18,643 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T14:49:18,643 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T14:49:18,643 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T14:49:18,643 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T14:49:18,643 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T14:49:18,643 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35909,1732114157364-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:49:18,643 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35909,1732114157364-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T14:49:18,645 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T14:49:18,646 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T14:49:18,646 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,35909,1732114157364-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:49:18,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d3cbc04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:49:18,668 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a15ecfd95f4,35909,-1 for getting cluster id 2024-11-20T14:49:18,668 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T14:49:18,669 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '99a9e3d2-8f8e-4641-aa11-8c4e135cc9be' 2024-11-20T14:49:18,670 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T14:49:18,670 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "99a9e3d2-8f8e-4641-aa11-8c4e135cc9be" 2024-11-20T14:49:18,670 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2237d2f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:49:18,670 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a15ecfd95f4,35909,-1] 2024-11-20T14:49:18,670 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T14:49:18,671 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:49:18,672 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57102, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T14:49:18,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@773adf7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:49:18,673 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:49:18,674 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,38145,1732114157548, seqNum=-1] 2024-11-20T14:49:18,674 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:49:18,675 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43340, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:49:18,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:18,677 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:49:18,680 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T14:49:18,680 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-20T14:49:18,681 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a15ecfd95f4,35909,1732114157364 2024-11-20T14:49:18,681 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7a2edca0 2024-11-20T14:49:18,682 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T14:49:18,683 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T14:49:18,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35909 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-20T14:49:18,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35909 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-20T14:49:18,684 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35909 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:49:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35909 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-20T14:49:18,687 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T14:49:18,687 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:18,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35909 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-20T14:49:18,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35909 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:49:18,689 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T14:49:18,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741835_1011 (size=381) 2024-11-20T14:49:18,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741835_1011 (size=381) 2024-11-20T14:49:18,698 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c651af5dc8a126e2f59db5888a2c5820, NAME => 'TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a 2024-11-20T14:49:18,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741836_1012 (size=64) 2024-11-20T14:49:18,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741836_1012 (size=64) 2024-11-20T14:49:18,704 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:49:18,705 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing c651af5dc8a126e2f59db5888a2c5820, disabling compactions & flushes 2024-11-20T14:49:18,705 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:18,705 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:18,705 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. after waiting 0 ms 2024-11-20T14:49:18,705 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:18,705 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:18,705 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c651af5dc8a126e2f59db5888a2c5820: Waiting for close lock at 1732114158705Disabling compacts and flushes for region at 1732114158705Disabling writes for close at 1732114158705Writing region close event to WAL at 1732114158705Closed at 1732114158705 2024-11-20T14:49:18,706 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T14:49:18,707 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732114158706"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732114158706"}]},"ts":"1732114158706"} 2024-11-20T14:49:18,709 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-20T14:49:18,710 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T14:49:18,710 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732114158710"}]},"ts":"1732114158710"} 2024-11-20T14:49:18,713 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-20T14:49:18,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, ASSIGN}] 2024-11-20T14:49:18,715 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, ASSIGN 2024-11-20T14:49:18,716 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, ASSIGN; state=OFFLINE, location=1a15ecfd95f4,38145,1732114157548; forceNewPlan=false, retain=false 2024-11-20T14:49:18,866 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c651af5dc8a126e2f59db5888a2c5820, regionState=OPENING, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:18,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, ASSIGN because future has completed 2024-11-20T14:49:18,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c651af5dc8a126e2f59db5888a2c5820, server=1a15ecfd95f4,38145,1732114157548}] 2024-11-20T14:49:19,029 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:19,029 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c651af5dc8a126e2f59db5888a2c5820, NAME => 'TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:49:19,029 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,030 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:49:19,030 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,030 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,031 INFO [StoreOpener-c651af5dc8a126e2f59db5888a2c5820-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,033 INFO [StoreOpener-c651af5dc8a126e2f59db5888a2c5820-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c651af5dc8a126e2f59db5888a2c5820 columnFamilyName info 2024-11-20T14:49:19,034 DEBUG [StoreOpener-c651af5dc8a126e2f59db5888a2c5820-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:19,034 INFO [StoreOpener-c651af5dc8a126e2f59db5888a2c5820-1 {}] regionserver.HStore(327): Store=c651af5dc8a126e2f59db5888a2c5820/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:49:19,034 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,035 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,036 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,036 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,036 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,038 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,041 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:49:19,041 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c651af5dc8a126e2f59db5888a2c5820; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696261, jitterRate=-0.11465892195701599}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T14:49:19,041 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:19,042 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c651af5dc8a126e2f59db5888a2c5820: Running coprocessor pre-open hook at 1732114159030Writing region info on filesystem at 1732114159030Initializing all the Stores at 1732114159031 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114159031Cleaning up temporary data from old regions at 1732114159036 (+5 ms)Running coprocessor post-open hooks at 1732114159041 (+5 ms)Region opened successfully at 1732114159042 (+1 ms) 2024-11-20T14:49:19,043 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., pid=6, masterSystemTime=1732114159024 2024-11-20T14:49:19,045 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:19,045 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:19,046 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c651af5dc8a126e2f59db5888a2c5820, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:19,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c651af5dc8a126e2f59db5888a2c5820, server=1a15ecfd95f4,38145,1732114157548 because future has completed 2024-11-20T14:49:19,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T14:49:19,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c651af5dc8a126e2f59db5888a2c5820, server=1a15ecfd95f4,38145,1732114157548 in 179 msec 2024-11-20T14:49:19,055 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T14:49:19,055 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, ASSIGN in 339 msec 2024-11-20T14:49:19,056 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T14:49:19,057 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732114159056"}]},"ts":"1732114159056"} 2024-11-20T14:49:19,059 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-20T14:49:19,060 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T14:49:19,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 376 msec 2024-11-20T14:49:19,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:19,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:19,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:19,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:20,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:20,462 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:49:20,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,464 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,493 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,494 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,494 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:20,598 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-20T14:49:20,598 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-20T14:49:20,599 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-20T14:49:21,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:21,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:22,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:22,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:23,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:23,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:24,046 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T14:49:24,047 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-20T14:49:24,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:24,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:25,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:25,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:26,102 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:49:26,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,105 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,142 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:26,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:26,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:27,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:27,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:28,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:28,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:28,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35909 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-20T14:49:28,764 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-20T14:49:28,764 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-20T14:49:28,768 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-20T14:49:28,768 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:28,771 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=2] 2024-11-20T14:49:28,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:28,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c651af5dc8a126e2f59db5888a2c5820 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:49:28,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/21783ae8911f4ae29279cc06005af3ae is 1080, key is row0001/info:/1732114168772/Put/seqid=0 2024-11-20T14:49:28,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741837_1013 (size=12509) 2024-11-20T14:49:28,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741837_1013 (size=12509) 2024-11-20T14:49:28,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/21783ae8911f4ae29279cc06005af3ae 2024-11-20T14:49:28,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/21783ae8911f4ae29279cc06005af3ae as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/21783ae8911f4ae29279cc06005af3ae 2024-11-20T14:49:28,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/21783ae8911f4ae29279cc06005af3ae, entries=7, sequenceid=11, filesize=12.2 K 2024-11-20T14:49:28,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for c651af5dc8a126e2f59db5888a2c5820 in 50ms, sequenceid=11, compaction requested=false 2024-11-20T14:49:28,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:28,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c651af5dc8a126e2f59db5888a2c5820 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-20T14:49:28,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/97fd14e5f158401486b2489541333724 is 1080, key is row0008/info:/1732114168786/Put/seqid=0 2024-11-20T14:49:28,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741838_1014 (size=28684) 2024-11-20T14:49:28,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741838_1014 (size=28684) 2024-11-20T14:49:28,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/97fd14e5f158401486b2489541333724 2024-11-20T14:49:28,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/97fd14e5f158401486b2489541333724 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724 2024-11-20T14:49:28,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724, entries=22, sequenceid=36, filesize=28.0 K 2024-11-20T14:49:28,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for c651af5dc8a126e2f59db5888a2c5820 in 26ms, sequenceid=36, compaction requested=false 2024-11-20T14:49:28,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:28,862 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-20T14:49:28,862 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:28,862 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724 because midkey is the same as first or last row 2024-11-20T14:49:29,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:29,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:30,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:30,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:30,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:30,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c651af5dc8a126e2f59db5888a2c5820 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:49:30,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/6d1419b6a810438880a13fed14b87ebb is 1080, key is row0030/info:/1732114168838/Put/seqid=0 2024-11-20T14:49:30,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741839_1015 (size=12509) 2024-11-20T14:49:30,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741839_1015 (size=12509) 2024-11-20T14:49:30,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/6d1419b6a810438880a13fed14b87ebb 2024-11-20T14:49:30,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/6d1419b6a810438880a13fed14b87ebb as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/6d1419b6a810438880a13fed14b87ebb 2024-11-20T14:49:30,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/6d1419b6a810438880a13fed14b87ebb, entries=7, sequenceid=46, filesize=12.2 K 2024-11-20T14:49:30,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for c651af5dc8a126e2f59db5888a2c5820 in 28ms, sequenceid=46, compaction requested=true 2024-11-20T14:49:30,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:30,884 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-20T14:49:30,884 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:30,884 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724 because midkey is the same as first or last row 2024-11-20T14:49:30,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:30,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c651af5dc8a126e2f59db5888a2c5820:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:49:30,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:30,884 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:49:30,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c651af5dc8a126e2f59db5888a2c5820 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T14:49:30,886 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:49:30,886 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): c651af5dc8a126e2f59db5888a2c5820/info is initiating minor compaction (all files) 2024-11-20T14:49:30,886 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c651af5dc8a126e2f59db5888a2c5820/info in TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:30,886 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/21783ae8911f4ae29279cc06005af3ae, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/6d1419b6a810438880a13fed14b87ebb] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp, totalSize=52.4 K 2024-11-20T14:49:30,887 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 21783ae8911f4ae29279cc06005af3ae, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732114168772 2024-11-20T14:49:30,888 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 97fd14e5f158401486b2489541333724, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1732114168786 2024-11-20T14:49:30,888 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6d1419b6a810438880a13fed14b87ebb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1732114168838 2024-11-20T14:49:30,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/156b863395cd4f6bac9e8e3f6fe4a148 is 1080, key is row0037/info:/1732114170857/Put/seqid=0 2024-11-20T14:49:30,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741840_1016 (size=17894) 2024-11-20T14:49:30,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741840_1016 (size=17894) 2024-11-20T14:49:30,913 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/156b863395cd4f6bac9e8e3f6fe4a148 2024-11-20T14:49:30,916 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c651af5dc8a126e2f59db5888a2c5820#info#compaction#59 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:30,916 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/63eb379154c04d7ab43c4bf0674ce0d7 is 1080, key is row0001/info:/1732114168772/Put/seqid=0 2024-11-20T14:49:30,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/156b863395cd4f6bac9e8e3f6fe4a148 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/156b863395cd4f6bac9e8e3f6fe4a148 2024-11-20T14:49:30,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741841_1017 (size=43901) 2024-11-20T14:49:30,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741841_1017 (size=43901) 2024-11-20T14:49:30,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/156b863395cd4f6bac9e8e3f6fe4a148, entries=12, sequenceid=61, filesize=17.5 K 2024-11-20T14:49:30,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=16.81 KB/17216 for c651af5dc8a126e2f59db5888a2c5820 in 44ms, sequenceid=61, compaction requested=false 2024-11-20T14:49:30,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:30,928 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.9 K, sizeToCheck=16.0 K 2024-11-20T14:49:30,928 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:30,928 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724 because midkey is the same as first or last row 2024-11-20T14:49:30,931 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/63eb379154c04d7ab43c4bf0674ce0d7 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7 2024-11-20T14:49:30,937 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c651af5dc8a126e2f59db5888a2c5820/info of c651af5dc8a126e2f59db5888a2c5820 into 63eb379154c04d7ab43c4bf0674ce0d7(size=42.9 K), total size for store is 60.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:30,937 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:30,937 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., storeName=c651af5dc8a126e2f59db5888a2c5820/info, priority=13, startTime=1732114170884; duration=0sec 2024-11-20T14:49:30,937 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-20T14:49:30,937 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7 because midkey is the same as first or last row 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7 because midkey is the same as first or last row 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7 because midkey is the same as first or last row 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:30,938 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c651af5dc8a126e2f59db5888a2c5820:info 2024-11-20T14:49:31,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:31,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:32,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:32,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:32,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:32,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c651af5dc8a126e2f59db5888a2c5820 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-20T14:49:32,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/3aef41c16a4a469bbfe15f1ff8565429 is 1080, key is row0049/info:/1732114170886/Put/seqid=0 2024-11-20T14:49:32,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741842_1018 (size=23299) 2024-11-20T14:49:32,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741842_1018 (size=23299) 2024-11-20T14:49:32,941 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/3aef41c16a4a469bbfe15f1ff8565429 2024-11-20T14:49:32,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/3aef41c16a4a469bbfe15f1ff8565429 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/3aef41c16a4a469bbfe15f1ff8565429 2024-11-20T14:49:32,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/3aef41c16a4a469bbfe15f1ff8565429, entries=17, sequenceid=82, filesize=22.8 K 2024-11-20T14:49:32,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=11.56 KB/11836 for c651af5dc8a126e2f59db5888a2c5820 in 34ms, sequenceid=82, compaction requested=true 2024-11-20T14:49:32,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:32,960 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-20T14:49:32,960 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:32,961 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7 because midkey is the same as first or last row 2024-11-20T14:49:32,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c651af5dc8a126e2f59db5888a2c5820:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:49:32,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:32,961 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:49:32,962 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:49:32,962 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): c651af5dc8a126e2f59db5888a2c5820/info is initiating minor compaction (all files) 2024-11-20T14:49:32,962 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c651af5dc8a126e2f59db5888a2c5820/info in TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:32,962 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/156b863395cd4f6bac9e8e3f6fe4a148, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/3aef41c16a4a469bbfe15f1ff8565429] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp, totalSize=83.1 K 2024-11-20T14:49:32,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:32,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c651af5dc8a126e2f59db5888a2c5820 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-20T14:49:32,963 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63eb379154c04d7ab43c4bf0674ce0d7, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1732114168772 2024-11-20T14:49:32,964 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 156b863395cd4f6bac9e8e3f6fe4a148, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732114170857 2024-11-20T14:49:32,965 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3aef41c16a4a469bbfe15f1ff8565429, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732114170886 2024-11-20T14:49:32,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/c6cc9cd653194550be80dbfdb5e88e3e is 1080, key is row0066/info:/1732114172928/Put/seqid=0 2024-11-20T14:49:32,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741843_1019 (size=18987) 2024-11-20T14:49:32,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741843_1019 (size=18987) 2024-11-20T14:49:32,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/c6cc9cd653194550be80dbfdb5e88e3e 2024-11-20T14:49:32,985 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c651af5dc8a126e2f59db5888a2c5820#info#compaction#62 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:32,986 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/9c91593f3e8d4bdbaa1b288784e057a1 is 1080, key is row0001/info:/1732114168772/Put/seqid=0 2024-11-20T14:49:32,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/c6cc9cd653194550be80dbfdb5e88e3e as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/c6cc9cd653194550be80dbfdb5e88e3e 2024-11-20T14:49:32,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/c6cc9cd653194550be80dbfdb5e88e3e, entries=13, sequenceid=98, filesize=18.5 K 2024-11-20T14:49:32,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=8.41 KB/8608 for c651af5dc8a126e2f59db5888a2c5820 in 31ms, sequenceid=98, compaction requested=false 2024-11-20T14:49:32,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:32,994 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.6 K, sizeToCheck=16.0 K 2024-11-20T14:49:32,994 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:32,994 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7 because midkey is the same as first or last row 2024-11-20T14:49:32,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741844_1020 (size=75378) 2024-11-20T14:49:32,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741844_1020 (size=75378) 2024-11-20T14:49:32,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:32,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c651af5dc8a126e2f59db5888a2c5820 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-20T14:49:33,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/2a74c3869df54ec7bc314877eb838713 is 1080, key is row0079/info:/1732114172964/Put/seqid=0 2024-11-20T14:49:33,003 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/9c91593f3e8d4bdbaa1b288784e057a1 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1 2024-11-20T14:49:33,011 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c651af5dc8a126e2f59db5888a2c5820/info of c651af5dc8a126e2f59db5888a2c5820 into 9c91593f3e8d4bdbaa1b288784e057a1(size=73.6 K), total size for store is 92.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:33,011 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:33,011 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., storeName=c651af5dc8a126e2f59db5888a2c5820/info, priority=13, startTime=1732114172961; duration=0sec 2024-11-20T14:49:33,011 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-20T14:49:33,012 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:33,012 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-20T14:49:33,012 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:33,012 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-11-20T14:49:33,012 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:33,015 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:33,015 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:33,015 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c651af5dc8a126e2f59db5888a2c5820:info 2024-11-20T14:49:33,016 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35909 {}] assignment.AssignmentManager(1363): Split request from 1a15ecfd95f4,38145,1732114157548, parent={ENCODED => c651af5dc8a126e2f59db5888a2c5820, NAME => 'TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-20T14:49:33,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741845_1021 (size=14663) 2024-11-20T14:49:33,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741845_1021 (size=14663) 2024-11-20T14:49:33,022 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35909 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:33,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/2a74c3869df54ec7bc314877eb838713 2024-11-20T14:49:33,026 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35909 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c651af5dc8a126e2f59db5888a2c5820, daughterA=02d4c0e15b10c9c4bb345c18ac5578b0, daughterB=51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/2a74c3869df54ec7bc314877eb838713 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/2a74c3869df54ec7bc314877eb838713 2024-11-20T14:49:33,029 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c651af5dc8a126e2f59db5888a2c5820, daughterA=02d4c0e15b10c9c4bb345c18ac5578b0, daughterB=51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,029 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c651af5dc8a126e2f59db5888a2c5820, daughterA=02d4c0e15b10c9c4bb345c18ac5578b0, daughterB=51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,029 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c651af5dc8a126e2f59db5888a2c5820, daughterA=02d4c0e15b10c9c4bb345c18ac5578b0, daughterB=51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/2a74c3869df54ec7bc314877eb838713, entries=9, sequenceid=110, filesize=14.3 K 2024-11-20T14:49:33,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, UNASSIGN}] 2024-11-20T14:49:33,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=9.46 KB/9684 for c651af5dc8a126e2f59db5888a2c5820 in 42ms, sequenceid=110, compaction requested=true 2024-11-20T14:49:33,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c651af5dc8a126e2f59db5888a2c5820: 2024-11-20T14:49:33,038 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=106.5 K, sizeToCheck=16.0 K 2024-11-20T14:49:33,038 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:33,038 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, UNASSIGN 2024-11-20T14:49:33,039 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=106.5 K, sizeToCheck=16.0 K 2024-11-20T14:49:33,039 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:33,039 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=106.5 K, sizeToCheck=16.0 K 2024-11-20T14:49:33,039 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-20T14:49:33,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-20T14:49:33,040 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35909 {}] assignment.AssignmentManager(1363): Split request from 1a15ecfd95f4,38145,1732114157548, parent={ENCODED => c651af5dc8a126e2f59db5888a2c5820, NAME => 'TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-20T14:49:33,040 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35909 {}] assignment.AssignmentManager(1378): Ignoring split request from 1a15ecfd95f4,38145,1732114157548, parent={ENCODED => c651af5dc8a126e2f59db5888a2c5820, NAME => 'TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-11-20T14:49:33,041 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c651af5dc8a126e2f59db5888a2c5820, regionState=CLOSING, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:33,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, UNASSIGN because future has completed 2024-11-20T14:49:33,044 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-20T14:49:33,044 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure c651af5dc8a126e2f59db5888a2c5820, server=1a15ecfd95f4,38145,1732114157548}] 2024-11-20T14:49:33,202 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,202 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-20T14:49:33,202 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing c651af5dc8a126e2f59db5888a2c5820, disabling compactions & flushes 2024-11-20T14:49:33,202 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:33,203 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:33,203 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. after waiting 0 ms 2024-11-20T14:49:33,203 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:33,203 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing c651af5dc8a126e2f59db5888a2c5820 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-20T14:49:33,207 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/796892dbcc024cde99b2e125bde4db00 is 1080, key is row0088/info:/1732114172998/Put/seqid=0 2024-11-20T14:49:33,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741846_1022 (size=14663) 2024-11-20T14:49:33,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741846_1022 (size=14663) 2024-11-20T14:49:33,225 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/796892dbcc024cde99b2e125bde4db00 2024-11-20T14:49:33,230 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/.tmp/info/796892dbcc024cde99b2e125bde4db00 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/796892dbcc024cde99b2e125bde4db00 2024-11-20T14:49:33,236 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/796892dbcc024cde99b2e125bde4db00, entries=9, sequenceid=123, filesize=14.3 K 2024-11-20T14:49:33,237 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for c651af5dc8a126e2f59db5888a2c5820 in 34ms, sequenceid=123, compaction requested=true 2024-11-20T14:49:33,238 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/21783ae8911f4ae29279cc06005af3ae, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/6d1419b6a810438880a13fed14b87ebb, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/156b863395cd4f6bac9e8e3f6fe4a148, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/3aef41c16a4a469bbfe15f1ff8565429] to archive 2024-11-20T14:49:33,239 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T14:49:33,240 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/21783ae8911f4ae29279cc06005af3ae to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/21783ae8911f4ae29279cc06005af3ae 2024-11-20T14:49:33,241 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/97fd14e5f158401486b2489541333724 2024-11-20T14:49:33,244 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/63eb379154c04d7ab43c4bf0674ce0d7 2024-11-20T14:49:33,245 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/6d1419b6a810438880a13fed14b87ebb to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/6d1419b6a810438880a13fed14b87ebb 2024-11-20T14:49:33,246 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/156b863395cd4f6bac9e8e3f6fe4a148 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/156b863395cd4f6bac9e8e3f6fe4a148 2024-11-20T14:49:33,248 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/3aef41c16a4a469bbfe15f1ff8565429 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/3aef41c16a4a469bbfe15f1ff8565429 2024-11-20T14:49:33,254 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-20T14:49:33,255 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. 2024-11-20T14:49:33,255 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for c651af5dc8a126e2f59db5888a2c5820: Waiting for close lock at 1732114173202Running coprocessor pre-close hooks at 1732114173202Disabling compacts and flushes for region at 1732114173202Disabling writes for close at 1732114173203 (+1 ms)Obtaining lock to block concurrent updates at 1732114173203Preparing flush snapshotting stores in c651af5dc8a126e2f59db5888a2c5820 at 1732114173203Finished memstore snapshotting TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., syncing WAL and waiting on mvcc, flushsize=dataSize=9684, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732114173203Flushing stores of TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. at 1732114173204 (+1 ms)Flushing c651af5dc8a126e2f59db5888a2c5820/info: creating writer at 1732114173204Flushing c651af5dc8a126e2f59db5888a2c5820/info: appending metadata at 1732114173207 (+3 ms)Flushing c651af5dc8a126e2f59db5888a2c5820/info: closing flushed file at 1732114173207Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a8ba806: reopening flushed file at 1732114173229 (+22 ms)Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for c651af5dc8a126e2f59db5888a2c5820 in 34ms, sequenceid=123, compaction requested=true at 1732114173237 (+8 ms)Writing region close event to WAL at 1732114173250 (+13 ms)Running coprocessor post-close hooks at 1732114173255 (+5 ms)Closed at 1732114173255 2024-11-20T14:49:33,258 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,259 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c651af5dc8a126e2f59db5888a2c5820, regionState=CLOSED 2024-11-20T14:49:33,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure c651af5dc8a126e2f59db5888a2c5820, server=1a15ecfd95f4,38145,1732114157548 because future has completed 2024-11-20T14:49:33,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-20T14:49:33,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure c651af5dc8a126e2f59db5888a2c5820, server=1a15ecfd95f4,38145,1732114157548 in 226 msec 2024-11-20T14:49:33,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-20T14:49:33,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c651af5dc8a126e2f59db5888a2c5820, UNASSIGN in 236 msec 2024-11-20T14:49:33,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:33,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:33,285 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:33,289 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=c651af5dc8a126e2f59db5888a2c5820, threads=4 2024-11-20T14:49:33,292 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/796892dbcc024cde99b2e125bde4db00 for region: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,292 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/2a74c3869df54ec7bc314877eb838713 for region: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,292 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1 for region: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,293 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/c6cc9cd653194550be80dbfdb5e88e3e for region: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,310 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/796892dbcc024cde99b2e125bde4db00, top=true 2024-11-20T14:49:33,316 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/c6cc9cd653194550be80dbfdb5e88e3e, top=true 2024-11-20T14:49:33,320 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/2a74c3869df54ec7bc314877eb838713, top=true 2024-11-20T14:49:33,335 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-796892dbcc024cde99b2e125bde4db00 for child: 51da5e56255b7a27e76c1c57b1da4656, parent: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,336 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/796892dbcc024cde99b2e125bde4db00 for region: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,341 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-c6cc9cd653194550be80dbfdb5e88e3e for child: 51da5e56255b7a27e76c1c57b1da4656, parent: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,341 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/c6cc9cd653194550be80dbfdb5e88e3e for region: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,345 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-2a74c3869df54ec7bc314877eb838713 for child: 51da5e56255b7a27e76c1c57b1da4656, parent: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,345 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/2a74c3869df54ec7bc314877eb838713 for region: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741847_1023 (size=27) 2024-11-20T14:49:33,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741847_1023 (size=27) 2024-11-20T14:49:33,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741848_1024 (size=27) 2024-11-20T14:49:33,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741848_1024 (size=27) 2024-11-20T14:49:33,364 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1 for region: c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:49:33,366 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region c651af5dc8a126e2f59db5888a2c5820 Daughter A: [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820] storefiles, Daughter B: [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-2a74c3869df54ec7bc314877eb838713, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-796892dbcc024cde99b2e125bde4db00, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-c6cc9cd653194550be80dbfdb5e88e3e] storefiles. 2024-11-20T14:49:33,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741849_1025 (size=71) 2024-11-20T14:49:33,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741849_1025 (size=71) 2024-11-20T14:49:33,388 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:33,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741850_1026 (size=71) 2024-11-20T14:49:33,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741850_1026 (size=71) 2024-11-20T14:49:33,405 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:33,421 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-20T14:49:33,424 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-20T14:49:33,428 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732114173427"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732114173427"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732114173427"}]},"ts":"1732114173427"} 2024-11-20T14:49:33,428 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732114173427"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732114173427"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732114173427"}]},"ts":"1732114173427"} 2024-11-20T14:49:33,428 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732114173427"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732114173427"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732114173427"}]},"ts":"1732114173427"} 2024-11-20T14:49:33,454 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=02d4c0e15b10c9c4bb345c18ac5578b0, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51da5e56255b7a27e76c1c57b1da4656, ASSIGN}] 2024-11-20T14:49:33,455 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51da5e56255b7a27e76c1c57b1da4656, ASSIGN 2024-11-20T14:49:33,455 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=02d4c0e15b10c9c4bb345c18ac5578b0, ASSIGN 2024-11-20T14:49:33,456 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51da5e56255b7a27e76c1c57b1da4656, ASSIGN; state=SPLITTING_NEW, location=1a15ecfd95f4,38145,1732114157548; forceNewPlan=false, retain=false 2024-11-20T14:49:33,456 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=02d4c0e15b10c9c4bb345c18ac5578b0, ASSIGN; state=SPLITTING_NEW, location=1a15ecfd95f4,38145,1732114157548; forceNewPlan=false, retain=false 2024-11-20T14:49:33,607 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=02d4c0e15b10c9c4bb345c18ac5578b0, regionState=OPENING, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:33,607 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=51da5e56255b7a27e76c1c57b1da4656, regionState=OPENING, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:33,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=02d4c0e15b10c9c4bb345c18ac5578b0, ASSIGN because future has completed 2024-11-20T14:49:33,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 02d4c0e15b10c9c4bb345c18ac5578b0, server=1a15ecfd95f4,38145,1732114157548}] 2024-11-20T14:49:33,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51da5e56255b7a27e76c1c57b1da4656, ASSIGN because future has completed 2024-11-20T14:49:33,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548}] 2024-11-20T14:49:33,769 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:33,769 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 51da5e56255b7a27e76c1c57b1da4656, NAME => 'TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-20T14:49:33,769 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,769 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:49:33,769 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,769 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,771 INFO [StoreOpener-51da5e56255b7a27e76c1c57b1da4656-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,771 INFO [StoreOpener-51da5e56255b7a27e76c1c57b1da4656-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51da5e56255b7a27e76c1c57b1da4656 columnFamilyName info 2024-11-20T14:49:33,771 DEBUG [StoreOpener-51da5e56255b7a27e76c1c57b1da4656-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:33,783 DEBUG [StoreOpener-51da5e56255b7a27e76c1c57b1da4656-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820->hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1-top 2024-11-20T14:49:33,789 DEBUG [StoreOpener-51da5e56255b7a27e76c1c57b1da4656-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-2a74c3869df54ec7bc314877eb838713 2024-11-20T14:49:33,794 DEBUG [StoreOpener-51da5e56255b7a27e76c1c57b1da4656-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-796892dbcc024cde99b2e125bde4db00 2024-11-20T14:49:33,798 DEBUG [StoreOpener-51da5e56255b7a27e76c1c57b1da4656-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-c6cc9cd653194550be80dbfdb5e88e3e 2024-11-20T14:49:33,798 INFO [StoreOpener-51da5e56255b7a27e76c1c57b1da4656-1 {}] regionserver.HStore(327): Store=51da5e56255b7a27e76c1c57b1da4656/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:49:33,798 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,799 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,800 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,800 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,801 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,803 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,804 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 51da5e56255b7a27e76c1c57b1da4656; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737518, jitterRate=-0.062198057770729065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T14:49:33,804 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:33,805 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 51da5e56255b7a27e76c1c57b1da4656: Running coprocessor pre-open hook at 1732114173769Writing region info on filesystem at 1732114173769Initializing all the Stores at 1732114173770 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114173770Cleaning up temporary data from old regions at 1732114173801 (+31 ms)Running coprocessor post-open hooks at 1732114173804 (+3 ms)Region opened successfully at 1732114173805 (+1 ms) 2024-11-20T14:49:33,806 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., pid=13, masterSystemTime=1732114173765 2024-11-20T14:49:33,806 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 51da5e56255b7a27e76c1c57b1da4656:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:49:33,806 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:33,806 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T14:49:33,808 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:33,808 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): 51da5e56255b7a27e76c1c57b1da4656/info is initiating minor compaction (all files) 2024-11-20T14:49:33,808 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51da5e56255b7a27e76c1c57b1da4656/info in TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:33,808 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820->hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1-top, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-c6cc9cd653194550be80dbfdb5e88e3e, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-2a74c3869df54ec7bc314877eb838713, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-796892dbcc024cde99b2e125bde4db00] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp, totalSize=120.8 K 2024-11-20T14:49:33,808 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:33,808 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:33,809 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:49:33,809 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732114168772 2024-11-20T14:49:33,809 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 02d4c0e15b10c9c4bb345c18ac5578b0, NAME => 'TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-20T14:49:33,809 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,809 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-c6cc9cd653194550be80dbfdb5e88e3e, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1732114172928 2024-11-20T14:49:33,809 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:49:33,809 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=51da5e56255b7a27e76c1c57b1da4656, regionState=OPEN, openSeqNum=127, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:33,809 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,809 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,809 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-2a74c3869df54ec7bc314877eb838713, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732114172964 2024-11-20T14:49:33,810 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-796892dbcc024cde99b2e125bde4db00, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732114172998 2024-11-20T14:49:33,810 INFO [StoreOpener-02d4c0e15b10c9c4bb345c18ac5578b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,811 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-20T14:49:33,811 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-20T14:49:33,812 INFO [StoreOpener-02d4c0e15b10c9c4bb345c18ac5578b0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02d4c0e15b10c9c4bb345c18ac5578b0 columnFamilyName info 2024-11-20T14:49:33,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-20T14:49:33,812 DEBUG [StoreOpener-02d4c0e15b10c9c4bb345c18ac5578b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:49:33,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 because future has completed 2024-11-20T14:49:33,817 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-20T14:49:33,817 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 in 200 msec 2024-11-20T14:49:33,819 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51da5e56255b7a27e76c1c57b1da4656, ASSIGN in 363 msec 2024-11-20T14:49:33,823 DEBUG [StoreOpener-02d4c0e15b10c9c4bb345c18ac5578b0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820->hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1-bottom 2024-11-20T14:49:33,823 INFO [StoreOpener-02d4c0e15b10c9c4bb345c18ac5578b0-1 {}] regionserver.HStore(327): Store=02d4c0e15b10c9c4bb345c18ac5578b0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:49:33,824 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,824 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,826 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,827 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,827 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,829 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,830 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 02d4c0e15b10c9c4bb345c18ac5578b0; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735739, jitterRate=-0.06445994973182678}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T14:49:33,830 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:49:33,831 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 02d4c0e15b10c9c4bb345c18ac5578b0: Running coprocessor pre-open hook at 1732114173809Writing region info on filesystem at 1732114173809Initializing all the Stores at 1732114173810 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114173810Cleaning up temporary data from old regions at 1732114173827 (+17 ms)Running coprocessor post-open hooks at 1732114173830 (+3 ms)Region opened successfully at 1732114173831 (+1 ms) 2024-11-20T14:49:33,832 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0., pid=12, masterSystemTime=1732114173765 2024-11-20T14:49:33,832 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 02d4c0e15b10c9c4bb345c18ac5578b0:info, priority=-2147483648, current under compaction store size is 2 2024-11-20T14:49:33,832 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T14:49:33,832 DEBUG [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-20T14:49:33,833 INFO [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:49:33,833 DEBUG [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.HStore(1541): 02d4c0e15b10c9c4bb345c18ac5578b0/info is initiating minor compaction (all files) 2024-11-20T14:49:33,833 INFO [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 02d4c0e15b10c9c4bb345c18ac5578b0/info in TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:49:33,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/info/c0f959f8ce594bb7a1f5907e68c9fce5 is 193, key is TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656./info:regioninfo/1732114173809/Put/seqid=0 2024-11-20T14:49:33,833 INFO [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820->hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1-bottom] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/.tmp, totalSize=73.6 K 2024-11-20T14:49:33,834 DEBUG [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] compactions.Compactor(225): Compacting 9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732114168772 2024-11-20T14:49:33,834 DEBUG [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:49:33,835 INFO [RS_OPEN_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:49:33,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741851_1027 (size=9882) 2024-11-20T14:49:33,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741851_1027 (size=9882) 2024-11-20T14:49:33,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/info/c0f959f8ce594bb7a1f5907e68c9fce5 2024-11-20T14:49:33,840 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=02d4c0e15b10c9c4bb345c18ac5578b0, regionState=OPEN, openSeqNum=127, regionLocation=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:33,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 02d4c0e15b10c9c4bb345c18ac5578b0, server=1a15ecfd95f4,38145,1732114157548 because future has completed 2024-11-20T14:49:33,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-20T14:49:33,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 02d4c0e15b10c9c4bb345c18ac5578b0, server=1a15ecfd95f4,38145,1732114157548 in 233 msec 2024-11-20T14:49:33,849 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51da5e56255b7a27e76c1c57b1da4656#info#compaction#66 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:33,850 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/0f3641592eb84fef8a64d8f619dffb04 is 1080, key is row0062/info:/1732114170917/Put/seqid=0 2024-11-20T14:49:33,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-20T14:49:33,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=02d4c0e15b10c9c4bb345c18ac5578b0, ASSIGN in 394 msec 2024-11-20T14:49:33,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c651af5dc8a126e2f59db5888a2c5820, daughterA=02d4c0e15b10c9c4bb345c18ac5578b0, daughterB=51da5e56255b7a27e76c1c57b1da4656 in 829 msec 2024-11-20T14:49:33,864 INFO [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 02d4c0e15b10c9c4bb345c18ac5578b0#info#compaction#67 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:33,864 DEBUG [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/.tmp/info/642fef8de76541eeab6d351bfa0e0552 is 1080, key is row0001/info:/1732114168772/Put/seqid=0 2024-11-20T14:49:33,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741852_1028 (size=43081) 2024-11-20T14:49:33,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741852_1028 (size=43081) 2024-11-20T14:49:33,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/ns/c964fa268fcd4d3f9ab225e780e11fe1 is 43, key is default/ns:d/1732114158631/Put/seqid=0 2024-11-20T14:49:33,875 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/0f3641592eb84fef8a64d8f619dffb04 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0f3641592eb84fef8a64d8f619dffb04 2024-11-20T14:49:33,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741853_1029 (size=70862) 2024-11-20T14:49:33,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741853_1029 (size=70862) 2024-11-20T14:49:33,884 DEBUG [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/.tmp/info/642fef8de76541eeab6d351bfa0e0552 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/info/642fef8de76541eeab6d351bfa0e0552 2024-11-20T14:49:33,887 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 51da5e56255b7a27e76c1c57b1da4656/info of 51da5e56255b7a27e76c1c57b1da4656 into 0f3641592eb84fef8a64d8f619dffb04(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:33,887 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:33,887 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., storeName=51da5e56255b7a27e76c1c57b1da4656/info, priority=12, startTime=1732114173806; duration=0sec 2024-11-20T14:49:33,888 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:33,888 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51da5e56255b7a27e76c1c57b1da4656:info 2024-11-20T14:49:33,892 INFO [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 02d4c0e15b10c9c4bb345c18ac5578b0/info of 02d4c0e15b10c9c4bb345c18ac5578b0 into 642fef8de76541eeab6d351bfa0e0552(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:33,892 DEBUG [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 02d4c0e15b10c9c4bb345c18ac5578b0: 2024-11-20T14:49:33,893 INFO [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0., storeName=02d4c0e15b10c9c4bb345c18ac5578b0/info, priority=15, startTime=1732114173832; duration=0sec 2024-11-20T14:49:33,893 DEBUG [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:33,893 DEBUG [RS:0;1a15ecfd95f4:38145-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 02d4c0e15b10c9c4bb345c18ac5578b0:info 2024-11-20T14:49:33,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741854_1030 (size=5153) 2024-11-20T14:49:33,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741854_1030 (size=5153) 2024-11-20T14:49:33,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/ns/c964fa268fcd4d3f9ab225e780e11fe1 2024-11-20T14:49:33,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/table/743898ccb7114780a809cd71ec0ccaef is 65, key is TestLogRolling-testLogRolling/table:state/1732114159056/Put/seqid=0 2024-11-20T14:49:33,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741855_1031 (size=5340) 2024-11-20T14:49:33,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741855_1031 (size=5340) 2024-11-20T14:49:33,933 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/table/743898ccb7114780a809cd71ec0ccaef 2024-11-20T14:49:33,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/info/c0f959f8ce594bb7a1f5907e68c9fce5 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/info/c0f959f8ce594bb7a1f5907e68c9fce5 2024-11-20T14:49:33,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/info/c0f959f8ce594bb7a1f5907e68c9fce5, entries=30, sequenceid=17, filesize=9.7 K 2024-11-20T14:49:33,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/ns/c964fa268fcd4d3f9ab225e780e11fe1 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/ns/c964fa268fcd4d3f9ab225e780e11fe1 2024-11-20T14:49:33,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/ns/c964fa268fcd4d3f9ab225e780e11fe1, entries=2, sequenceid=17, filesize=5.0 K 2024-11-20T14:49:33,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/table/743898ccb7114780a809cd71ec0ccaef as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/table/743898ccb7114780a809cd71ec0ccaef 2024-11-20T14:49:33,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/table/743898ccb7114780a809cd71ec0ccaef, entries=2, sequenceid=17, filesize=5.2 K 2024-11-20T14:49:33,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 160ms, sequenceid=17, compaction requested=false 2024-11-20T14:49:33,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T14:49:34,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:34,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43340 deadline: 1732114185023, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. is not online on 1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:35,052 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. is not online on 1a15ecfd95f4,38145,1732114157548 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T14:49:35,053 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820. is not online on 1a15ecfd95f4,38145,1732114157548 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T14:49:35,053 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732114158684.c651af5dc8a126e2f59db5888a2c5820., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=2 from cache 2024-11-20T14:49:35,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:35,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:36,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:36,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:37,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:37,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:38,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:38,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:38,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,797 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-20T14:49:38,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:38,845 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-20T14:49:39,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:39,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:40,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:40,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:41,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:41,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:42,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:42,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:43,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:43,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:44,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:44,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:45,151 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127] 2024-11-20T14:49:45,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:45,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:49:45,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/ffba24decc0441778aa240fcd33fe7f9 is 1080, key is row0097/info:/1732114185152/Put/seqid=0 2024-11-20T14:49:45,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741856_1032 (size=12516) 2024-11-20T14:49:45,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741856_1032 (size=12516) 2024-11-20T14:49:45,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/ffba24decc0441778aa240fcd33fe7f9 2024-11-20T14:49:45,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/ffba24decc0441778aa240fcd33fe7f9 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/ffba24decc0441778aa240fcd33fe7f9 2024-11-20T14:49:45,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/ffba24decc0441778aa240fcd33fe7f9, entries=7, sequenceid=137, filesize=12.2 K 2024-11-20T14:49:45,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for 51da5e56255b7a27e76c1c57b1da4656 in 24ms, sequenceid=137, compaction requested=false 2024-11-20T14:49:45,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:45,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:45,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T14:49:45,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/d8a593355d5f41a288e28932d98379cd is 1080, key is row0104/info:/1732114185165/Put/seqid=0 2024-11-20T14:49:45,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741857_1033 (size=16828) 2024-11-20T14:49:45,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741857_1033 (size=16828) 2024-11-20T14:49:45,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:45,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:45,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/d8a593355d5f41a288e28932d98379cd 2024-11-20T14:49:45,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/d8a593355d5f41a288e28932d98379cd as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/d8a593355d5f41a288e28932d98379cd 2024-11-20T14:49:45,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/d8a593355d5f41a288e28932d98379cd, entries=11, sequenceid=151, filesize=16.4 K 2024-11-20T14:49:45,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for 51da5e56255b7a27e76c1c57b1da4656 in 429ms, sequenceid=151, compaction requested=true 2024-11-20T14:49:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51da5e56255b7a27e76c1c57b1da4656:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:49:45,619 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:49:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:45,620 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 72425 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:49:45,621 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): 51da5e56255b7a27e76c1c57b1da4656/info is initiating minor compaction (all files) 2024-11-20T14:49:45,621 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51da5e56255b7a27e76c1c57b1da4656/info in TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:45,621 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0f3641592eb84fef8a64d8f619dffb04, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/ffba24decc0441778aa240fcd33fe7f9, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/d8a593355d5f41a288e28932d98379cd] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp, totalSize=70.7 K 2024-11-20T14:49:45,621 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f3641592eb84fef8a64d8f619dffb04, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732114170917 2024-11-20T14:49:45,621 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting ffba24decc0441778aa240fcd33fe7f9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732114185152 2024-11-20T14:49:45,622 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting d8a593355d5f41a288e28932d98379cd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1732114185165 2024-11-20T14:49:45,635 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51da5e56255b7a27e76c1c57b1da4656#info#compaction#72 average throughput is 27.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:45,635 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/8e49ea87bdc342e9b10053dc64091b16 is 1080, key is row0062/info:/1732114170917/Put/seqid=0 2024-11-20T14:49:45,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741858_1034 (size=62655) 2024-11-20T14:49:45,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741858_1034 (size=62655) 2024-11-20T14:49:45,651 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/8e49ea87bdc342e9b10053dc64091b16 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/8e49ea87bdc342e9b10053dc64091b16 2024-11-20T14:49:45,657 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51da5e56255b7a27e76c1c57b1da4656/info of 51da5e56255b7a27e76c1c57b1da4656 into 8e49ea87bdc342e9b10053dc64091b16(size=61.2 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:45,657 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:45,657 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., storeName=51da5e56255b7a27e76c1c57b1da4656/info, priority=13, startTime=1732114185619; duration=0sec 2024-11-20T14:49:45,657 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:45,657 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51da5e56255b7a27e76c1c57b1da4656:info 2024-11-20T14:49:46,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:46,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:47,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:47,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-20T14:49:47,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/abfa847c6d004a8a994749fa48ec4c60 is 1080, key is row0115/info:/1732114185192/Put/seqid=0 2024-11-20T14:49:47,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741859_1035 (size=21156) 2024-11-20T14:49:47,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741859_1035 (size=21156) 2024-11-20T14:49:47,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/abfa847c6d004a8a994749fa48ec4c60 2024-11-20T14:49:47,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/abfa847c6d004a8a994749fa48ec4c60 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/abfa847c6d004a8a994749fa48ec4c60 2024-11-20T14:49:47,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/abfa847c6d004a8a994749fa48ec4c60, entries=15, sequenceid=170, filesize=20.7 K 2024-11-20T14:49:47,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 51da5e56255b7a27e76c1c57b1da4656 in 23ms, sequenceid=170, compaction requested=false 2024-11-20T14:49:47,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:47,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:47,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T14:49:47,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/565d0f9ba2034b38adae0d38a4f39a85 is 1080, key is row0130/info:/1732114187222/Put/seqid=0 2024-11-20T14:49:47,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741860_1036 (size=16828) 2024-11-20T14:49:47,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741860_1036 (size=16828) 2024-11-20T14:49:47,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/565d0f9ba2034b38adae0d38a4f39a85 2024-11-20T14:49:47,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/565d0f9ba2034b38adae0d38a4f39a85 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/565d0f9ba2034b38adae0d38a4f39a85 2024-11-20T14:49:47,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/565d0f9ba2034b38adae0d38a4f39a85, entries=11, sequenceid=184, filesize=16.4 K 2024-11-20T14:49:47,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 51da5e56255b7a27e76c1c57b1da4656 in 40ms, sequenceid=184, compaction requested=true 2024-11-20T14:49:47,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:47,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:47,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51da5e56255b7a27e76c1c57b1da4656:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:49:47,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:47,286 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:49:47,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-20T14:49:47,287 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100639 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:49:47,287 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): 51da5e56255b7a27e76c1c57b1da4656/info is initiating minor compaction (all files) 2024-11-20T14:49:47,287 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51da5e56255b7a27e76c1c57b1da4656/info in TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:47,288 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/8e49ea87bdc342e9b10053dc64091b16, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/abfa847c6d004a8a994749fa48ec4c60, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/565d0f9ba2034b38adae0d38a4f39a85] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp, totalSize=98.3 K 2024-11-20T14:49:47,288 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e49ea87bdc342e9b10053dc64091b16, keycount=53, bloomtype=ROW, size=61.2 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1732114170917 2024-11-20T14:49:47,289 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting abfa847c6d004a8a994749fa48ec4c60, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732114185192 2024-11-20T14:49:47,290 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 565d0f9ba2034b38adae0d38a4f39a85, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732114187222 2024-11-20T14:49:47,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:47,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:47,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/3feb6c2a8bde4aa58ee9dda2e225817f is 1080, key is row0141/info:/1732114187246/Put/seqid=0 2024-11-20T14:49:47,333 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51da5e56255b7a27e76c1c57b1da4656#info#compaction#76 average throughput is 40.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:47,334 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/4e1d573e6e7f4497a8a86604c88e0b61 is 1080, key is row0062/info:/1732114170917/Put/seqid=0 2024-11-20T14:49:47,335 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T14:49:47,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741861_1037 (size=16828) 2024-11-20T14:49:47,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741861_1037 (size=16828) 2024-11-20T14:49:47,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/3feb6c2a8bde4aa58ee9dda2e225817f 2024-11-20T14:49:47,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/3feb6c2a8bde4aa58ee9dda2e225817f as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/3feb6c2a8bde4aa58ee9dda2e225817f 2024-11-20T14:49:47,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/3feb6c2a8bde4aa58ee9dda2e225817f, entries=11, sequenceid=198, filesize=16.4 K 2024-11-20T14:49:47,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 51da5e56255b7a27e76c1c57b1da4656 in 68ms, sequenceid=198, compaction requested=false 2024-11-20T14:49:47,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:47,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741862_1038 (size=90862) 2024-11-20T14:49:47,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741862_1038 (size=90862) 2024-11-20T14:49:47,368 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/4e1d573e6e7f4497a8a86604c88e0b61 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/4e1d573e6e7f4497a8a86604c88e0b61 2024-11-20T14:49:47,385 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51da5e56255b7a27e76c1c57b1da4656/info of 51da5e56255b7a27e76c1c57b1da4656 into 4e1d573e6e7f4497a8a86604c88e0b61(size=88.7 K), total size for store is 105.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:47,385 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:47,385 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., storeName=51da5e56255b7a27e76c1c57b1da4656/info, priority=13, startTime=1732114187286; duration=0sec 2024-11-20T14:49:47,385 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:47,385 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51da5e56255b7a27e76c1c57b1da4656:info 2024-11-20T14:49:48,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:48,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:49,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:49,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:49,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:49,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-20T14:49:49,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/5d7e583c98c74802952a46fce9e37d8c is 1080, key is row0152/info:/1732114187287/Put/seqid=0 2024-11-20T14:49:49,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741863_1039 (size=15750) 2024-11-20T14:49:49,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741863_1039 (size=15750) 2024-11-20T14:49:49,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/5d7e583c98c74802952a46fce9e37d8c 2024-11-20T14:49:49,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/5d7e583c98c74802952a46fce9e37d8c as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/5d7e583c98c74802952a46fce9e37d8c 2024-11-20T14:49:49,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/5d7e583c98c74802952a46fce9e37d8c, entries=10, sequenceid=212, filesize=15.4 K 2024-11-20T14:49:49,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for 51da5e56255b7a27e76c1c57b1da4656 in 27ms, sequenceid=212, compaction requested=true 2024-11-20T14:49:49,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:49,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51da5e56255b7a27e76c1c57b1da4656:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:49:49,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:49,336 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:49:49,338 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123440 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:49:49,338 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): 51da5e56255b7a27e76c1c57b1da4656/info is initiating minor compaction (all files) 2024-11-20T14:49:49,338 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51da5e56255b7a27e76c1c57b1da4656/info in TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:49,338 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/4e1d573e6e7f4497a8a86604c88e0b61, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/3feb6c2a8bde4aa58ee9dda2e225817f, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/5d7e583c98c74802952a46fce9e37d8c] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp, totalSize=120.5 K 2024-11-20T14:49:49,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:49,338 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4e1d573e6e7f4497a8a86604c88e0b61, keycount=79, bloomtype=ROW, size=88.7 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732114170917 2024-11-20T14:49:49,338 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-20T14:49:49,339 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3feb6c2a8bde4aa58ee9dda2e225817f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732114187246 2024-11-20T14:49:49,339 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5d7e583c98c74802952a46fce9e37d8c, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732114187287 2024-11-20T14:49:49,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/a82de50542d84d309500d0a84e002fde is 1080, key is row0162/info:/1732114189310/Put/seqid=0 2024-11-20T14:49:49,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741864_1040 (size=19000) 2024-11-20T14:49:49,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741864_1040 (size=19000) 2024-11-20T14:49:49,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=228 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/a82de50542d84d309500d0a84e002fde 2024-11-20T14:49:49,352 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51da5e56255b7a27e76c1c57b1da4656#info#compaction#79 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:49,353 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/0feab4008a0246f29f3df06c63eb6392 is 1080, key is row0062/info:/1732114170917/Put/seqid=0 2024-11-20T14:49:49,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/a82de50542d84d309500d0a84e002fde as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/a82de50542d84d309500d0a84e002fde 2024-11-20T14:49:49,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741865_1041 (size=113606) 2024-11-20T14:49:49,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741865_1041 (size=113606) 2024-11-20T14:49:49,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/a82de50542d84d309500d0a84e002fde, entries=13, sequenceid=228, filesize=18.6 K 2024-11-20T14:49:49,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=8.41 KB/8608 for 51da5e56255b7a27e76c1c57b1da4656 in 24ms, sequenceid=228, compaction requested=false 2024-11-20T14:49:49,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:49,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:49,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-20T14:49:49,365 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/0feab4008a0246f29f3df06c63eb6392 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0feab4008a0246f29f3df06c63eb6392 2024-11-20T14:49:49,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/9e2d24b2f59a419788ba12744adc4541 is 1080, key is row0175/info:/1732114189340/Put/seqid=0 2024-11-20T14:49:49,373 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51da5e56255b7a27e76c1c57b1da4656/info of 51da5e56255b7a27e76c1c57b1da4656 into 0feab4008a0246f29f3df06c63eb6392(size=110.9 K), total size for store is 129.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:49,373 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:49,373 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., storeName=51da5e56255b7a27e76c1c57b1da4656/info, priority=13, startTime=1732114189336; duration=0sec 2024-11-20T14:49:49,374 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:49,374 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51da5e56255b7a27e76c1c57b1da4656:info 2024-11-20T14:49:49,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741866_1042 (size=14672) 2024-11-20T14:49:49,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741866_1042 (size=14672) 2024-11-20T14:49:49,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/9e2d24b2f59a419788ba12744adc4541 2024-11-20T14:49:49,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/9e2d24b2f59a419788ba12744adc4541 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9e2d24b2f59a419788ba12744adc4541 2024-11-20T14:49:49,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9e2d24b2f59a419788ba12744adc4541, entries=9, sequenceid=240, filesize=14.3 K 2024-11-20T14:49:49,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=9.46 KB/9684 for 51da5e56255b7a27e76c1c57b1da4656 in 27ms, sequenceid=240, compaction requested=true 2024-11-20T14:49:49,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:49,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51da5e56255b7a27e76c1c57b1da4656:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:49:49,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:49,391 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:49:49,392 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 147278 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:49:49,392 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): 51da5e56255b7a27e76c1c57b1da4656/info is initiating minor compaction (all files) 2024-11-20T14:49:49,392 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51da5e56255b7a27e76c1c57b1da4656/info in TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:49:49,393 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0feab4008a0246f29f3df06c63eb6392, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/a82de50542d84d309500d0a84e002fde, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9e2d24b2f59a419788ba12744adc4541] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp, totalSize=143.8 K 2024-11-20T14:49:49,393 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0feab4008a0246f29f3df06c63eb6392, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732114170917 2024-11-20T14:49:49,393 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting a82de50542d84d309500d0a84e002fde, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=228, earliestPutTs=1732114189310 2024-11-20T14:49:49,394 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9e2d24b2f59a419788ba12744adc4541, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732114189340 2024-11-20T14:49:49,405 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51da5e56255b7a27e76c1c57b1da4656#info#compaction#81 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:49:49,406 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/c5eba35257564196ae23c82f3f2cf32e is 1080, key is row0062/info:/1732114170917/Put/seqid=0 2024-11-20T14:49:49,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741867_1043 (size=137560) 2024-11-20T14:49:49,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741867_1043 (size=137560) 2024-11-20T14:49:49,414 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/c5eba35257564196ae23c82f3f2cf32e as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/c5eba35257564196ae23c82f3f2cf32e 2024-11-20T14:49:49,421 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51da5e56255b7a27e76c1c57b1da4656/info of 51da5e56255b7a27e76c1c57b1da4656 into c5eba35257564196ae23c82f3f2cf32e(size=134.3 K), total size for store is 134.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:49:49,421 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:49,421 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., storeName=51da5e56255b7a27e76c1c57b1da4656/info, priority=13, startTime=1732114189391; duration=0sec 2024-11-20T14:49:49,421 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:49:49,421 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51da5e56255b7a27e76c1c57b1da4656:info 2024-11-20T14:49:50,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:50,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:51,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:51,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:51,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:49:51,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-20T14:49:51,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/cd99021c9b4c40038494e33525b692e9 is 1080, key is row0184/info:/1732114189365/Put/seqid=0 2024-11-20T14:49:51,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741868_1044 (size=15750) 2024-11-20T14:49:51,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741868_1044 (size=15750) 2024-11-20T14:49:51,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-20T14:49:51,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43340 deadline: 1732114201430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:49:51,431 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T14:49:51,431 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T14:49:51,431 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127 because the exception is null or not the one we care about 2024-11-20T14:49:51,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/cd99021c9b4c40038494e33525b692e9 2024-11-20T14:49:51,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/cd99021c9b4c40038494e33525b692e9 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/cd99021c9b4c40038494e33525b692e9 2024-11-20T14:49:51,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/cd99021c9b4c40038494e33525b692e9, entries=10, sequenceid=255, filesize=15.4 K 2024-11-20T14:49:51,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=19.96 KB/20444 for 51da5e56255b7a27e76c1c57b1da4656 in 428ms, sequenceid=255, compaction requested=false 2024-11-20T14:49:51,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:49:52,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:52,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:53,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:53,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:54,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:54,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:55,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:55,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:56,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:56,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:57,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:57,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:58,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:58,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:58,687 INFO [master/1a15ecfd95f4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T14:49:58,687 INFO [master/1a15ecfd95f4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T14:49:59,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:49:59,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:00,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:00,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:01,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:01,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:01,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:50:01,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-20T14:50:01,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/619a7077317c4025a0c2dd17a95cc492 is 1080, key is row0194/info:/1732114191391/Put/seqid=0 2024-11-20T14:50:01,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741869_1045 (size=26570) 2024-11-20T14:50:01,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741869_1045 (size=26570) 2024-11-20T14:50:01,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/619a7077317c4025a0c2dd17a95cc492 2024-11-20T14:50:01,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/619a7077317c4025a0c2dd17a95cc492 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/619a7077317c4025a0c2dd17a95cc492 2024-11-20T14:50:01,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-20T14:50:01,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43340 deadline: 1732114211498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 2024-11-20T14:50:01,499 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T14:50:01,499 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51da5e56255b7a27e76c1c57b1da4656, server=1a15ecfd95f4,38145,1732114157548 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-20T14:50:01,499 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., hostname=1a15ecfd95f4,38145,1732114157548, seqNum=127 because the exception is null or not the one we care about 2024-11-20T14:50:01,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/619a7077317c4025a0c2dd17a95cc492, entries=20, sequenceid=278, filesize=25.9 K 2024-11-20T14:50:01,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for 51da5e56255b7a27e76c1c57b1da4656 in 28ms, sequenceid=278, compaction requested=true 2024-11-20T14:50:01,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:50:01,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51da5e56255b7a27e76c1c57b1da4656:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:50:01,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:50:01,505 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:50:01,506 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 179880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:50:01,506 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): 51da5e56255b7a27e76c1c57b1da4656/info is initiating minor compaction (all files) 2024-11-20T14:50:01,506 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51da5e56255b7a27e76c1c57b1da4656/info in TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:50:01,506 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/c5eba35257564196ae23c82f3f2cf32e, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/cd99021c9b4c40038494e33525b692e9, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/619a7077317c4025a0c2dd17a95cc492] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp, totalSize=175.7 K 2024-11-20T14:50:01,506 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5eba35257564196ae23c82f3f2cf32e, keycount=122, bloomtype=ROW, size=134.3 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732114170917 2024-11-20T14:50:01,507 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd99021c9b4c40038494e33525b692e9, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732114189365 2024-11-20T14:50:01,507 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 619a7077317c4025a0c2dd17a95cc492, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732114191391 2024-11-20T14:50:01,519 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51da5e56255b7a27e76c1c57b1da4656#info#compaction#84 average throughput is 51.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:50:01,519 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/04011181eeac4e75bde1e28d09ffce08 is 1080, key is row0062/info:/1732114170917/Put/seqid=0 2024-11-20T14:50:01,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741870_1046 (size=170095) 2024-11-20T14:50:01,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741870_1046 (size=170095) 2024-11-20T14:50:01,534 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/04011181eeac4e75bde1e28d09ffce08 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/04011181eeac4e75bde1e28d09ffce08 2024-11-20T14:50:01,539 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51da5e56255b7a27e76c1c57b1da4656/info of 51da5e56255b7a27e76c1c57b1da4656 into 04011181eeac4e75bde1e28d09ffce08(size=166.1 K), total size for store is 166.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:50:01,540 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:50:01,540 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., storeName=51da5e56255b7a27e76c1c57b1da4656/info, priority=13, startTime=1732114201505; duration=0sec 2024-11-20T14:50:01,540 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:50:01,540 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51da5e56255b7a27e76c1c57b1da4656:info 2024-11-20T14:50:02,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:02,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:03,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:03,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:03,544 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-20T14:50:04,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:04,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:05,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:05,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:06,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:06,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:07,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:07,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:08,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:08,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:09,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:09,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:10,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:10,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:11,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:11,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:50:11,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-20T14:50:11,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/705a5924ffe14351b84fd877641ff9d3 is 1080, key is row0214/info:/1732114201478/Put/seqid=0 2024-11-20T14:50:11,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741871_1047 (size=15760) 2024-11-20T14:50:11,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741871_1047 (size=15760) 2024-11-20T14:50:11,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/705a5924ffe14351b84fd877641ff9d3 2024-11-20T14:50:11,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/705a5924ffe14351b84fd877641ff9d3 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/705a5924ffe14351b84fd877641ff9d3 2024-11-20T14:50:11,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/705a5924ffe14351b84fd877641ff9d3, entries=10, sequenceid=292, filesize=15.4 K 2024-11-20T14:50:11,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=1.05 KB/1076 for 51da5e56255b7a27e76c1c57b1da4656 in 34ms, sequenceid=292, compaction requested=false 2024-11-20T14:50:11,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:50:12,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:12,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:13,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:13,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:13,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:50:13,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-20T14:50:13,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/92d483ae98de418f986f2c5323c9d6b5 is 1080, key is row0224/info:/1732114211547/Put/seqid=0 2024-11-20T14:50:13,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741872_1048 (size=12523) 2024-11-20T14:50:13,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741872_1048 (size=12523) 2024-11-20T14:50:13,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=302 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/92d483ae98de418f986f2c5323c9d6b5 2024-11-20T14:50:13,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/92d483ae98de418f986f2c5323c9d6b5 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/92d483ae98de418f986f2c5323c9d6b5 2024-11-20T14:50:13,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/92d483ae98de418f986f2c5323c9d6b5, entries=7, sequenceid=302, filesize=12.2 K 2024-11-20T14:50:13,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 51da5e56255b7a27e76c1c57b1da4656 in 24ms, sequenceid=302, compaction requested=true 2024-11-20T14:50:13,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:50:13,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51da5e56255b7a27e76c1c57b1da4656:info, priority=-2147483648, current under compaction store size is 1 2024-11-20T14:50:13,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:50:13,583 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T14:50:13,584 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 198378 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T14:50:13,584 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1541): 51da5e56255b7a27e76c1c57b1da4656/info is initiating minor compaction (all files) 2024-11-20T14:50:13,584 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51da5e56255b7a27e76c1c57b1da4656/info in TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:50:13,584 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/04011181eeac4e75bde1e28d09ffce08, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/705a5924ffe14351b84fd877641ff9d3, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/92d483ae98de418f986f2c5323c9d6b5] into tmpdir=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp, totalSize=193.7 K 2024-11-20T14:50:13,585 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04011181eeac4e75bde1e28d09ffce08, keycount=152, bloomtype=ROW, size=166.1 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732114170917 2024-11-20T14:50:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38145 {}] regionserver.HRegion(8855): Flush requested on 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:50:13,585 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 705a5924ffe14351b84fd877641ff9d3, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732114201478 2024-11-20T14:50:13,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-20T14:50:13,585 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] compactions.Compactor(225): Compacting 92d483ae98de418f986f2c5323c9d6b5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=302, earliestPutTs=1732114211547 2024-11-20T14:50:13,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/2905974e829c4824b2d6924cc2d40526 is 1080, key is row0231/info:/1732114213560/Put/seqid=0 2024-11-20T14:50:13,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741873_1049 (size=17918) 2024-11-20T14:50:13,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741873_1049 (size=17918) 2024-11-20T14:50:13,612 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51da5e56255b7a27e76c1c57b1da4656#info#compaction#88 average throughput is 43.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T14:50:13,612 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/85aeee4ad8cc497abaf0713c0d52a5d1 is 1080, key is row0062/info:/1732114170917/Put/seqid=0 2024-11-20T14:50:13,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741874_1050 (size=188548) 2024-11-20T14:50:13,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741874_1050 (size=188548) 2024-11-20T14:50:13,624 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/85aeee4ad8cc497abaf0713c0d52a5d1 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/85aeee4ad8cc497abaf0713c0d52a5d1 2024-11-20T14:50:13,631 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51da5e56255b7a27e76c1c57b1da4656/info of 51da5e56255b7a27e76c1c57b1da4656 into 85aeee4ad8cc497abaf0713c0d52a5d1(size=184.1 K), total size for store is 184.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T14:50:13,631 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:50:13,631 INFO [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., storeName=51da5e56255b7a27e76c1c57b1da4656/info, priority=13, startTime=1732114213583; duration=0sec 2024-11-20T14:50:13,631 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T14:50:13,631 DEBUG [RS:0;1a15ecfd95f4:38145-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51da5e56255b7a27e76c1c57b1da4656:info 2024-11-20T14:50:14,010 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/2905974e829c4824b2d6924cc2d40526 2024-11-20T14:50:14,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/2905974e829c4824b2d6924cc2d40526 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/2905974e829c4824b2d6924cc2d40526 2024-11-20T14:50:14,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/2905974e829c4824b2d6924cc2d40526, entries=12, sequenceid=317, filesize=17.5 K 2024-11-20T14:50:14,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for 51da5e56255b7a27e76c1c57b1da4656 in 439ms, sequenceid=317, compaction requested=false 2024-11-20T14:50:14,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:50:14,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:14,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:15,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:15,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:15,613 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-20T14:50:15,613 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38145%2C1732114157548.1732114215613 2024-11-20T14:50:15,622 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,622 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,622 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,622 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,622 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,622 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114158187 with entries=312, filesize=308.51 KB; new WAL /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114215613 2024-11-20T14:50:15,624 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33067:33067),(127.0.0.1/127.0.0.1:35095:35095)] 2024-11-20T14:50:15,624 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114158187 is not closed yet, will try archiving it next time 2024-11-20T14:50:15,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741833_1009 (size=315921) 2024-11-20T14:50:15,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741833_1009 (size=315921) 2024-11-20T14:50:15,629 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 51da5e56255b7a27e76c1c57b1da4656 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-20T14:50:15,634 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/96db8d02fd7c472e8123fec1d402f7f9 is 1080, key is row0243/info:/1732114213586/Put/seqid=0 2024-11-20T14:50:15,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741876_1052 (size=20092) 2024-11-20T14:50:15,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741876_1052 (size=20092) 2024-11-20T14:50:15,639 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/96db8d02fd7c472e8123fec1d402f7f9 2024-11-20T14:50:15,644 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/.tmp/info/96db8d02fd7c472e8123fec1d402f7f9 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/96db8d02fd7c472e8123fec1d402f7f9 2024-11-20T14:50:15,649 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/96db8d02fd7c472e8123fec1d402f7f9, entries=14, sequenceid=335, filesize=19.6 K 2024-11-20T14:50:15,650 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 51da5e56255b7a27e76c1c57b1da4656 in 21ms, sequenceid=335, compaction requested=true 2024-11-20T14:50:15,650 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 51da5e56255b7a27e76c1c57b1da4656: 2024-11-20T14:50:15,651 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-20T14:50:15,654 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/info/a803bb2e080741c1b940fbb1177afbb2 is 186, key is TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0./info:regioninfo/1732114173840/Put/seqid=0 2024-11-20T14:50:15,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741877_1053 (size=6153) 2024-11-20T14:50:15,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741877_1053 (size=6153) 2024-11-20T14:50:15,659 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/info/a803bb2e080741c1b940fbb1177afbb2 2024-11-20T14:50:15,664 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/.tmp/info/a803bb2e080741c1b940fbb1177afbb2 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/info/a803bb2e080741c1b940fbb1177afbb2 2024-11-20T14:50:15,670 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/info/a803bb2e080741c1b940fbb1177afbb2, entries=5, sequenceid=21, filesize=6.0 K 2024-11-20T14:50:15,671 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-20T14:50:15,671 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-20T14:50:15,671 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 02d4c0e15b10c9c4bb345c18ac5578b0: 2024-11-20T14:50:15,671 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C38145%2C1732114157548.1732114215671 2024-11-20T14:50:15,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,688 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,688 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:15,688 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114215613 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114215671 2024-11-20T14:50:15,689 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35095:35095),(127.0.0.1/127.0.0.1:33067:33067)] 2024-11-20T14:50:15,689 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114215613 is not closed yet, will try archiving it next time 2024-11-20T14:50:15,689 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114158187 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/oldWALs/1a15ecfd95f4%2C38145%2C1732114157548.1732114158187 2024-11-20T14:50:15,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741875_1051 (size=731) 2024-11-20T14:50:15,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741875_1051 (size=731) 2024-11-20T14:50:15,690 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-20T14:50:15,690 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/WALs/1a15ecfd95f4,38145,1732114157548/1a15ecfd95f4%2C38145%2C1732114157548.1732114215613 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/oldWALs/1a15ecfd95f4%2C38145%2C1732114157548.1732114215613 2024-11-20T14:50:15,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T14:50:15,790 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:50:15,791 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:50:15,791 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:15,791 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:15,791 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T14:50:15,791 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1179018422, stopped=false 2024-11-20T14:50:15,791 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T14:50:15,791 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a15ecfd95f4,35909,1732114157364 2024-11-20T14:50:15,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:50:15,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:50:15,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:15,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:15,838 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:50:15,838 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:50:15,838 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:50:15,838 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:15,839 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a15ecfd95f4,38145,1732114157548' ***** 2024-11-20T14:50:15,839 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T14:50:15,839 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:50:15,839 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:50:15,839 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T14:50:15,839 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T14:50:15,839 INFO [RS:0;1a15ecfd95f4:38145 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T14:50:15,839 INFO [RS:0;1a15ecfd95f4:38145 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T14:50:15,839 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(3091): Received CLOSE for 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(3091): Received CLOSE for 02d4c0e15b10c9c4bb345c18ac5578b0 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(959): stopping server 1a15ecfd95f4,38145,1732114157548 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:50:15,840 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 51da5e56255b7a27e76c1c57b1da4656, disabling compactions & flushes 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a15ecfd95f4:38145. 2024-11-20T14:50:15,840 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:50:15,840 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:50:15,840 DEBUG [RS:0;1a15ecfd95f4:38145 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:50:15,840 DEBUG [RS:0;1a15ecfd95f4:38145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:15,840 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. after waiting 0 ms 2024-11-20T14:50:15,840 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T14:50:15,840 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-20T14:50:15,840 DEBUG [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1325): Online Regions={51da5e56255b7a27e76c1c57b1da4656=TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656., 1588230740=hbase:meta,,1.1588230740, 02d4c0e15b10c9c4bb345c18ac5578b0=TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0.} 2024-11-20T14:50:15,840 DEBUG [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1351): Waiting on 02d4c0e15b10c9c4bb345c18ac5578b0, 1588230740, 51da5e56255b7a27e76c1c57b1da4656 2024-11-20T14:50:15,840 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:50:15,840 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:50:15,840 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:50:15,840 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:50:15,840 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:50:15,840 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820->hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1-top, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-c6cc9cd653194550be80dbfdb5e88e3e, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-2a74c3869df54ec7bc314877eb838713, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0f3641592eb84fef8a64d8f619dffb04, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-796892dbcc024cde99b2e125bde4db00, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/ffba24decc0441778aa240fcd33fe7f9, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/8e49ea87bdc342e9b10053dc64091b16, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/d8a593355d5f41a288e28932d98379cd, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/abfa847c6d004a8a994749fa48ec4c60, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/4e1d573e6e7f4497a8a86604c88e0b61, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/565d0f9ba2034b38adae0d38a4f39a85, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/3feb6c2a8bde4aa58ee9dda2e225817f, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0feab4008a0246f29f3df06c63eb6392, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/5d7e583c98c74802952a46fce9e37d8c, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/a82de50542d84d309500d0a84e002fde, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/c5eba35257564196ae23c82f3f2cf32e, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9e2d24b2f59a419788ba12744adc4541, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/cd99021c9b4c40038494e33525b692e9, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/04011181eeac4e75bde1e28d09ffce08, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/619a7077317c4025a0c2dd17a95cc492, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/705a5924ffe14351b84fd877641ff9d3, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/92d483ae98de418f986f2c5323c9d6b5] to archive 2024-11-20T14:50:15,842 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T14:50:15,843 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:50:15,844 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-c6cc9cd653194550be80dbfdb5e88e3e to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-c6cc9cd653194550be80dbfdb5e88e3e 2024-11-20T14:50:15,845 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-20T14:50:15,846 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-2a74c3869df54ec7bc314877eb838713 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-2a74c3869df54ec7bc314877eb838713 2024-11-20T14:50:15,846 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:50:15,846 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:50:15,846 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114215840Running coprocessor pre-close hooks at 1732114215840Disabling compacts and flushes for region at 1732114215840Disabling writes for close at 1732114215840Writing region close event to WAL at 1732114215842 (+2 ms)Running coprocessor post-close hooks at 1732114215846 (+4 ms)Closed at 1732114215846 2024-11-20T14:50:15,846 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T14:50:15,847 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0f3641592eb84fef8a64d8f619dffb04 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0f3641592eb84fef8a64d8f619dffb04 2024-11-20T14:50:15,848 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-796892dbcc024cde99b2e125bde4db00 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/TestLogRolling-testLogRolling=c651af5dc8a126e2f59db5888a2c5820-796892dbcc024cde99b2e125bde4db00 2024-11-20T14:50:15,849 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/ffba24decc0441778aa240fcd33fe7f9 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/ffba24decc0441778aa240fcd33fe7f9 2024-11-20T14:50:15,850 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/8e49ea87bdc342e9b10053dc64091b16 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/8e49ea87bdc342e9b10053dc64091b16 2024-11-20T14:50:15,851 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/d8a593355d5f41a288e28932d98379cd to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/d8a593355d5f41a288e28932d98379cd 2024-11-20T14:50:15,852 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/abfa847c6d004a8a994749fa48ec4c60 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/abfa847c6d004a8a994749fa48ec4c60 2024-11-20T14:50:15,853 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/4e1d573e6e7f4497a8a86604c88e0b61 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/4e1d573e6e7f4497a8a86604c88e0b61 2024-11-20T14:50:15,855 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/565d0f9ba2034b38adae0d38a4f39a85 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/565d0f9ba2034b38adae0d38a4f39a85 2024-11-20T14:50:15,856 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/3feb6c2a8bde4aa58ee9dda2e225817f to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/3feb6c2a8bde4aa58ee9dda2e225817f 2024-11-20T14:50:15,857 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0feab4008a0246f29f3df06c63eb6392 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/0feab4008a0246f29f3df06c63eb6392 2024-11-20T14:50:15,858 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/5d7e583c98c74802952a46fce9e37d8c to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/5d7e583c98c74802952a46fce9e37d8c 2024-11-20T14:50:15,860 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/a82de50542d84d309500d0a84e002fde to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/a82de50542d84d309500d0a84e002fde 2024-11-20T14:50:15,861 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/c5eba35257564196ae23c82f3f2cf32e to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/c5eba35257564196ae23c82f3f2cf32e 2024-11-20T14:50:15,862 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9e2d24b2f59a419788ba12744adc4541 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/9e2d24b2f59a419788ba12744adc4541 2024-11-20T14:50:15,863 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/cd99021c9b4c40038494e33525b692e9 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/cd99021c9b4c40038494e33525b692e9 2024-11-20T14:50:15,864 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/04011181eeac4e75bde1e28d09ffce08 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/04011181eeac4e75bde1e28d09ffce08 2024-11-20T14:50:15,865 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/619a7077317c4025a0c2dd17a95cc492 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/619a7077317c4025a0c2dd17a95cc492 2024-11-20T14:50:15,866 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/705a5924ffe14351b84fd877641ff9d3 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/705a5924ffe14351b84fd877641ff9d3 2024-11-20T14:50:15,867 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/92d483ae98de418f986f2c5323c9d6b5 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/info/92d483ae98de418f986f2c5323c9d6b5 2024-11-20T14:50:15,868 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a15ecfd95f4:35909 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-20T14:50:15,868 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0f3641592eb84fef8a64d8f619dffb04=43081, ffba24decc0441778aa240fcd33fe7f9=12516, 8e49ea87bdc342e9b10053dc64091b16=62655, d8a593355d5f41a288e28932d98379cd=16828, abfa847c6d004a8a994749fa48ec4c60=21156, 4e1d573e6e7f4497a8a86604c88e0b61=90862, 565d0f9ba2034b38adae0d38a4f39a85=16828, 3feb6c2a8bde4aa58ee9dda2e225817f=16828, 0feab4008a0246f29f3df06c63eb6392=113606, 5d7e583c98c74802952a46fce9e37d8c=15750, a82de50542d84d309500d0a84e002fde=19000, c5eba35257564196ae23c82f3f2cf32e=137560, 9e2d24b2f59a419788ba12744adc4541=14672, cd99021c9b4c40038494e33525b692e9=15750, 04011181eeac4e75bde1e28d09ffce08=170095, 619a7077317c4025a0c2dd17a95cc492=26570, 705a5924ffe14351b84fd877641ff9d3=15760, 92d483ae98de418f986f2c5323c9d6b5=12523] 2024-11-20T14:50:15,877 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/51da5e56255b7a27e76c1c57b1da4656/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=126 2024-11-20T14:50:15,878 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:50:15,878 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 51da5e56255b7a27e76c1c57b1da4656: Waiting for close lock at 1732114215840Running coprocessor pre-close hooks at 1732114215840Disabling compacts and flushes for region at 1732114215840Disabling writes for close at 1732114215840Writing region close event to WAL at 1732114215869 (+29 ms)Running coprocessor post-close hooks at 1732114215878 (+9 ms)Closed at 1732114215878 2024-11-20T14:50:15,878 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732114173022.51da5e56255b7a27e76c1c57b1da4656. 2024-11-20T14:50:15,878 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 02d4c0e15b10c9c4bb345c18ac5578b0, disabling compactions & flushes 2024-11-20T14:50:15,878 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:50:15,878 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:50:15,878 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. after waiting 0 ms 2024-11-20T14:50:15,878 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:50:15,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820->hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/c651af5dc8a126e2f59db5888a2c5820/info/9c91593f3e8d4bdbaa1b288784e057a1-bottom] to archive 2024-11-20T14:50:15,880 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T14:50:15,881 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820 to hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/archive/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/info/9c91593f3e8d4bdbaa1b288784e057a1.c651af5dc8a126e2f59db5888a2c5820 2024-11-20T14:50:15,881 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-20T14:50:15,884 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/data/default/TestLogRolling-testLogRolling/02d4c0e15b10c9c4bb345c18ac5578b0/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-20T14:50:15,885 INFO [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:50:15,885 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 02d4c0e15b10c9c4bb345c18ac5578b0: Waiting for close lock at 1732114215878Running coprocessor pre-close hooks at 1732114215878Disabling compacts and flushes for region at 1732114215878Disabling writes for close at 1732114215878Writing region close event to WAL at 1732114215881 (+3 ms)Running coprocessor post-close hooks at 1732114215885 (+4 ms)Closed at 1732114215885 2024-11-20T14:50:15,885 DEBUG [RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732114173022.02d4c0e15b10c9c4bb345c18ac5578b0. 2024-11-20T14:50:16,040 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(976): stopping server 1a15ecfd95f4,38145,1732114157548; all regions closed. 2024-11-20T14:50:16,041 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,041 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,041 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,041 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,042 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741834_1010 (size=8107) 2024-11-20T14:50:16,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741834_1010 (size=8107) 2024-11-20T14:50:16,047 DEBUG [RS:0;1a15ecfd95f4:38145 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/oldWALs 2024-11-20T14:50:16,047 INFO [RS:0;1a15ecfd95f4:38145 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C38145%2C1732114157548.meta:.meta(num 1732114158533) 2024-11-20T14:50:16,048 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,048 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,048 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,048 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,048 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741878_1054 (size=780) 2024-11-20T14:50:16,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741878_1054 (size=780) 2024-11-20T14:50:16,053 DEBUG [RS:0;1a15ecfd95f4:38145 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/oldWALs 2024-11-20T14:50:16,053 INFO [regionserver/1a15ecfd95f4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:50:16,053 INFO [RS:0;1a15ecfd95f4:38145 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C38145%2C1732114157548:(num 1732114215671) 2024-11-20T14:50:16,053 DEBUG [RS:0;1a15ecfd95f4:38145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:16,053 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:50:16,053 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:50:16,053 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.ChoreService(370): Chore service for: regionserver/1a15ecfd95f4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T14:50:16,053 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:50:16,053 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:50:16,053 INFO [RS:0;1a15ecfd95f4:38145 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38145 2024-11-20T14:50:16,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:50:16,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a15ecfd95f4,38145,1732114157548 2024-11-20T14:50:16,087 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:50:16,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a15ecfd95f4,38145,1732114157548] 2024-11-20T14:50:16,104 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a15ecfd95f4,38145,1732114157548 already deleted, retry=false 2024-11-20T14:50:16,104 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a15ecfd95f4,38145,1732114157548 expired; onlineServers=0 2024-11-20T14:50:16,104 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a15ecfd95f4,35909,1732114157364' ***** 2024-11-20T14:50:16,104 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T14:50:16,105 INFO [M:0;1a15ecfd95f4:35909 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:50:16,105 INFO [M:0;1a15ecfd95f4:35909 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:50:16,105 DEBUG [M:0;1a15ecfd95f4:35909 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T14:50:16,105 DEBUG [M:0;1a15ecfd95f4:35909 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T14:50:16,105 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T14:50:16,105 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114157898 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114157898,5,FailOnTimeoutGroup] 2024-11-20T14:50:16,105 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114157898 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114157898,5,FailOnTimeoutGroup] 2024-11-20T14:50:16,105 INFO [M:0;1a15ecfd95f4:35909 {}] hbase.ChoreService(370): Chore service for: master/1a15ecfd95f4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T14:50:16,105 INFO [M:0;1a15ecfd95f4:35909 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:50:16,105 DEBUG [M:0;1a15ecfd95f4:35909 {}] master.HMaster(1795): Stopping service threads 2024-11-20T14:50:16,105 INFO [M:0;1a15ecfd95f4:35909 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T14:50:16,105 INFO [M:0;1a15ecfd95f4:35909 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:50:16,106 INFO [M:0;1a15ecfd95f4:35909 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T14:50:16,106 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T14:50:16,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T14:50:16,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:16,113 DEBUG [M:0;1a15ecfd95f4:35909 {}] zookeeper.ZKUtil(347): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T14:50:16,113 WARN [M:0;1a15ecfd95f4:35909 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T14:50:16,114 INFO [M:0;1a15ecfd95f4:35909 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/.lastflushedseqids 2024-11-20T14:50:16,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741879_1055 (size=228) 2024-11-20T14:50:16,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741879_1055 (size=228) 2024-11-20T14:50:16,127 INFO [M:0;1a15ecfd95f4:35909 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T14:50:16,127 INFO [M:0;1a15ecfd95f4:35909 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T14:50:16,127 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:50:16,127 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:16,127 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:16,127 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:50:16,127 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:16,128 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-20T14:50:16,146 DEBUG [M:0;1a15ecfd95f4:35909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd53e092cd194e08b063d94c0e1b6bea is 82, key is hbase:meta,,1/info:regioninfo/1732114158562/Put/seqid=0 2024-11-20T14:50:16,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741880_1056 (size=5672) 2024-11-20T14:50:16,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741880_1056 (size=5672) 2024-11-20T14:50:16,159 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd53e092cd194e08b063d94c0e1b6bea 2024-11-20T14:50:16,180 DEBUG [M:0;1a15ecfd95f4:35909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7523bdcbb9ec4e108a56ccd94d0ea17d is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732114159062/Put/seqid=0 2024-11-20T14:50:16,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741881_1057 (size=7091) 2024-11-20T14:50:16,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741881_1057 (size=7091) 2024-11-20T14:50:16,185 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7523bdcbb9ec4e108a56ccd94d0ea17d 2024-11-20T14:50:16,189 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7523bdcbb9ec4e108a56ccd94d0ea17d 2024-11-20T14:50:16,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:50:16,196 INFO [RS:0;1a15ecfd95f4:38145 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:50:16,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38145-0x1015a0221df0001, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:50:16,196 INFO [RS:0;1a15ecfd95f4:38145 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a15ecfd95f4,38145,1732114157548; zookeeper connection closed. 2024-11-20T14:50:16,196 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@73fad587 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@73fad587 2024-11-20T14:50:16,197 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T14:50:16,204 DEBUG [M:0;1a15ecfd95f4:35909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ad4ee42d6b949639d223d1205020086 is 69, key is 1a15ecfd95f4,38145,1732114157548/rs:state/1732114158030/Put/seqid=0 2024-11-20T14:50:16,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741882_1058 (size=5156) 2024-11-20T14:50:16,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741882_1058 (size=5156) 2024-11-20T14:50:16,210 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ad4ee42d6b949639d223d1205020086 2024-11-20T14:50:16,230 DEBUG [M:0;1a15ecfd95f4:35909 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aee730410117425ba7c66dc09e321db3 is 52, key is load_balancer_on/state:d/1732114158679/Put/seqid=0 2024-11-20T14:50:16,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741883_1059 (size=5056) 2024-11-20T14:50:16,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741883_1059 (size=5056) 2024-11-20T14:50:16,235 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aee730410117425ba7c66dc09e321db3 2024-11-20T14:50:16,240 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd53e092cd194e08b063d94c0e1b6bea as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dd53e092cd194e08b063d94c0e1b6bea 2024-11-20T14:50:16,244 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dd53e092cd194e08b063d94c0e1b6bea, entries=8, sequenceid=125, filesize=5.5 K 2024-11-20T14:50:16,245 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7523bdcbb9ec4e108a56ccd94d0ea17d as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7523bdcbb9ec4e108a56ccd94d0ea17d 2024-11-20T14:50:16,249 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7523bdcbb9ec4e108a56ccd94d0ea17d 2024-11-20T14:50:16,249 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7523bdcbb9ec4e108a56ccd94d0ea17d, entries=13, sequenceid=125, filesize=6.9 K 2024-11-20T14:50:16,250 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ad4ee42d6b949639d223d1205020086 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7ad4ee42d6b949639d223d1205020086 2024-11-20T14:50:16,255 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7ad4ee42d6b949639d223d1205020086, entries=1, sequenceid=125, filesize=5.0 K 2024-11-20T14:50:16,256 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aee730410117425ba7c66dc09e321db3 as hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aee730410117425ba7c66dc09e321db3 2024-11-20T14:50:16,260 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34623/user/jenkins/test-data/be6ba3b4-ea17-0a6b-fb43-24a7263f384a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aee730410117425ba7c66dc09e321db3, entries=1, sequenceid=125, filesize=4.9 K 2024-11-20T14:50:16,261 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=125, compaction requested=false 2024-11-20T14:50:16,263 INFO [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:16,263 DEBUG [M:0;1a15ecfd95f4:35909 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114216127Disabling compacts and flushes for region at 1732114216127Disabling writes for close at 1732114216127Obtaining lock to block concurrent updates at 1732114216128 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732114216128Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1732114216128Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732114216129 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732114216129Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732114216145 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732114216146 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732114216163 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732114216179 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732114216179Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732114216189 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732114216203 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732114216203Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732114216214 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732114216230 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732114216230Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60bffe52: reopening flushed file at 1732114216239 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20a9ec38: reopening flushed file at 1732114216244 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@470b686: reopening flushed file at 1732114216250 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64d6c6ae: reopening flushed file at 1732114216255 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=125, compaction requested=false at 1732114216261 (+6 ms)Writing region close event to WAL at 1732114216262 (+1 ms)Closed at 1732114216262 2024-11-20T14:50:16,263 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,263 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,263 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,263 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,263 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:16,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741830_1006 (size=61332) 2024-11-20T14:50:16,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36291 is added to blk_1073741830_1006 (size=61332) 2024-11-20T14:50:16,266 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:50:16,266 INFO [M:0;1a15ecfd95f4:35909 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T14:50:16,266 INFO [M:0;1a15ecfd95f4:35909 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35909 2024-11-20T14:50:16,266 INFO [M:0;1a15ecfd95f4:35909 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:50:16,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:16,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:16,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:50:16,396 INFO [M:0;1a15ecfd95f4:35909 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:50:16,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35909-0x1015a0221df0000, quorum=127.0.0.1:49307, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:50:16,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76d165ee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:50:16,400 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3ed34521{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:50:16,400 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:50:16,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7dd0dd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:50:16,400 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73b2b928{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.log.dir/,STOPPED} 2024-11-20T14:50:16,403 WARN [BP-190161143-172.17.0.2-1732114155508 heartbeating to localhost/127.0.0.1:34623 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:50:16,403 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:50:16,403 WARN [BP-190161143-172.17.0.2-1732114155508 heartbeating to localhost/127.0.0.1:34623 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-190161143-172.17.0.2-1732114155508 (Datanode Uuid ec2ef208-900f-42d5-b516-fc86d7ab2a11) service to localhost/127.0.0.1:34623 2024-11-20T14:50:16,403 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:50:16,404 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/data/data3/current/BP-190161143-172.17.0.2-1732114155508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:50:16,404 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/data/data4/current/BP-190161143-172.17.0.2-1732114155508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:50:16,404 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:50:16,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@303a5a71{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:50:16,407 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a5f8367{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:50:16,407 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:50:16,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3480767c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:50:16,407 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bbefcfb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.log.dir/,STOPPED} 2024-11-20T14:50:16,410 WARN [BP-190161143-172.17.0.2-1732114155508 heartbeating to localhost/127.0.0.1:34623 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:50:16,410 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:50:16,410 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:50:16,410 WARN [BP-190161143-172.17.0.2-1732114155508 heartbeating to localhost/127.0.0.1:34623 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-190161143-172.17.0.2-1732114155508 (Datanode Uuid 1ef7c392-9054-4371-a123-e78abbe43054) service to localhost/127.0.0.1:34623 2024-11-20T14:50:16,410 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/data/data1/current/BP-190161143-172.17.0.2-1732114155508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:50:16,411 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/cluster_46e4ca4e-fb16-3101-5b51-f67f78e3abef/data/data2/current/BP-190161143-172.17.0.2-1732114155508 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:50:16,411 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:50:16,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c994a01{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:50:16,417 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@19e3a83f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:50:16,417 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:50:16,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7319bc41{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:50:16,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f3cebf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.log.dir/,STOPPED} 2024-11-20T14:50:16,424 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T14:50:16,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T14:50:16,469 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=228 (was 205) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:34623 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34623 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34623 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34623 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34623 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34623 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34623 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34623 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34623 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=506 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 195) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9158 (was 9380) 2024-11-20T14:50:16,478 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=228, OpenFileDescriptor=506, MaxFileDescriptor=1048576, SystemLoadAverage=214, ProcessCount=11, AvailableMemoryMB=9158 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.log.dir so I do NOT create it in target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa277912-fc12-20ce-eda0-e734f32e6d0f/hadoop.tmp.dir so I do NOT create it in target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe, deleteOnExit=true 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/test.cache.data in system properties and HBase conf 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/hadoop.log.dir in system properties and HBase conf 2024-11-20T14:50:16,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-20T14:50:16,480 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T14:50:16,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:50:16,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T14:50:16,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/nfs.dump.dir in system properties and HBase conf 2024-11-20T14:50:16,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/java.io.tmpdir in system properties and HBase conf 2024-11-20T14:50:16,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T14:50:16,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T14:50:16,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T14:50:16,494 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:50:16,753 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:50:16,757 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:50:16,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:50:16,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:50:16,773 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:50:16,773 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:50:16,774 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@280c54a4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:50:16,774 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d15ac40{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:50:16,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3264995b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/java.io.tmpdir/jetty-localhost-41671-hadoop-hdfs-3_4_1-tests_jar-_-any-4888515446185143008/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:50:16,894 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ea1ad77{HTTP/1.1, (http/1.1)}{localhost:41671} 2024-11-20T14:50:16,894 INFO [Time-limited test {}] server.Server(415): Started @302093ms 2024-11-20T14:50:16,907 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-20T14:50:17,126 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:50:17,128 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:50:17,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:50:17,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:50:17,129 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T14:50:17,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@255c5192{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:50:17,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19b99f6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:50:17,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fb8d998{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/java.io.tmpdir/jetty-localhost-40289-hadoop-hdfs-3_4_1-tests_jar-_-any-16512767016206123050/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:50:17,241 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4fe8057e{HTTP/1.1, (http/1.1)}{localhost:40289} 2024-11-20T14:50:17,241 INFO [Time-limited test {}] server.Server(415): Started @302440ms 2024-11-20T14:50:17,242 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:50:17,266 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T14:50:17,269 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T14:50:17,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T14:50:17,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T14:50:17,270 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T14:50:17,270 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7460995c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/hadoop.log.dir/,AVAILABLE} 2024-11-20T14:50:17,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@395250f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T14:50:17,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:17,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:17,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4030687e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/java.io.tmpdir/jetty-localhost-43365-hadoop-hdfs-3_4_1-tests_jar-_-any-17077946865846829603/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:50:17,372 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11685cbc{HTTP/1.1, (http/1.1)}{localhost:43365} 2024-11-20T14:50:17,372 INFO [Time-limited test {}] server.Server(415): Started @302571ms 2024-11-20T14:50:17,373 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T14:50:17,971 WARN [Thread-2484 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/data/data1/current/BP-2050894618-172.17.0.2-1732114216497/current, will proceed with Du for space computation calculation, 2024-11-20T14:50:17,972 WARN [Thread-2485 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/data/data2/current/BP-2050894618-172.17.0.2-1732114216497/current, will proceed with Du for space computation calculation, 2024-11-20T14:50:17,987 WARN [Thread-2449 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:50:17,989 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x393c97a7492bcebe with lease ID 0xd6ae2d10225dd60e: Processing first storage report for DS-c5ee27b5-ddb4-4fe4-9b1c-165797aca3b8 from datanode DatanodeRegistration(127.0.0.1:43765, datanodeUuid=f781ca05-fa3d-4dcb-b438-9650e67bdf98, infoPort=37565, infoSecurePort=0, ipcPort=41899, storageInfo=lv=-57;cid=testClusterID;nsid=588918639;c=1732114216497) 2024-11-20T14:50:17,989 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x393c97a7492bcebe with lease ID 0xd6ae2d10225dd60e: from storage DS-c5ee27b5-ddb4-4fe4-9b1c-165797aca3b8 node DatanodeRegistration(127.0.0.1:43765, datanodeUuid=f781ca05-fa3d-4dcb-b438-9650e67bdf98, infoPort=37565, infoSecurePort=0, ipcPort=41899, storageInfo=lv=-57;cid=testClusterID;nsid=588918639;c=1732114216497), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:50:17,989 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x393c97a7492bcebe with lease ID 0xd6ae2d10225dd60e: Processing first storage report for DS-40c3a379-80a5-4ee8-a089-102e7680e91b from datanode DatanodeRegistration(127.0.0.1:43765, datanodeUuid=f781ca05-fa3d-4dcb-b438-9650e67bdf98, infoPort=37565, infoSecurePort=0, ipcPort=41899, storageInfo=lv=-57;cid=testClusterID;nsid=588918639;c=1732114216497) 2024-11-20T14:50:17,989 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x393c97a7492bcebe with lease ID 0xd6ae2d10225dd60e: from storage DS-40c3a379-80a5-4ee8-a089-102e7680e91b node DatanodeRegistration(127.0.0.1:43765, datanodeUuid=f781ca05-fa3d-4dcb-b438-9650e67bdf98, infoPort=37565, infoSecurePort=0, ipcPort=41899, storageInfo=lv=-57;cid=testClusterID;nsid=588918639;c=1732114216497), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:50:18,197 WARN [Thread-2496 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/data/data3/current/BP-2050894618-172.17.0.2-1732114216497/current, will proceed with Du for space computation calculation, 2024-11-20T14:50:18,197 WARN [Thread-2497 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/data/data4/current/BP-2050894618-172.17.0.2-1732114216497/current, will proceed with Du for space computation calculation, 2024-11-20T14:50:18,217 WARN [Thread-2472 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T14:50:18,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6cb5ba99e40bbea with lease ID 0xd6ae2d10225dd60f: Processing first storage report for DS-a621882d-0e1e-40d7-9b0e-5c9eecb3111f from datanode DatanodeRegistration(127.0.0.1:46457, datanodeUuid=5d2cbc10-2843-4d33-8ae6-a17394b88c1b, infoPort=35977, infoSecurePort=0, ipcPort=45649, storageInfo=lv=-57;cid=testClusterID;nsid=588918639;c=1732114216497) 2024-11-20T14:50:18,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6cb5ba99e40bbea with lease ID 0xd6ae2d10225dd60f: from storage DS-a621882d-0e1e-40d7-9b0e-5c9eecb3111f node DatanodeRegistration(127.0.0.1:46457, datanodeUuid=5d2cbc10-2843-4d33-8ae6-a17394b88c1b, infoPort=35977, infoSecurePort=0, ipcPort=45649, storageInfo=lv=-57;cid=testClusterID;nsid=588918639;c=1732114216497), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:50:18,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6cb5ba99e40bbea with lease ID 0xd6ae2d10225dd60f: Processing first storage report for DS-de43f31c-4c09-4a0b-a683-bfc169298fe8 from datanode DatanodeRegistration(127.0.0.1:46457, datanodeUuid=5d2cbc10-2843-4d33-8ae6-a17394b88c1b, infoPort=35977, infoSecurePort=0, ipcPort=45649, storageInfo=lv=-57;cid=testClusterID;nsid=588918639;c=1732114216497) 2024-11-20T14:50:18,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6cb5ba99e40bbea with lease ID 0xd6ae2d10225dd60f: from storage DS-de43f31c-4c09-4a0b-a683-bfc169298fe8 node DatanodeRegistration(127.0.0.1:46457, datanodeUuid=5d2cbc10-2843-4d33-8ae6-a17394b88c1b, infoPort=35977, infoSecurePort=0, ipcPort=45649, storageInfo=lv=-57;cid=testClusterID;nsid=588918639;c=1732114216497), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T14:50:18,302 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47 2024-11-20T14:50:18,305 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/zookeeper_0, clientPort=62617, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T14:50:18,306 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62617 2024-11-20T14:50:18,306 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:50:18,308 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:50:18,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:18,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:18,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:50:18,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741825_1001 (size=7) 2024-11-20T14:50:18,318 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd with version=8 2024-11-20T14:50:18,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40825/user/jenkins/test-data/9ca27062-563b-e5f6-7ec6-982ce7ad3cfe/hbase-staging 2024-11-20T14:50:18,320 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:50:18,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:50:18,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:50:18,320 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:50:18,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:50:18,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:50:18,320 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-20T14:50:18,320 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:50:18,321 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41805 2024-11-20T14:50:18,322 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41805 connecting to ZooKeeper ensemble=127.0.0.1:62617 2024-11-20T14:50:18,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:418050x0, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:50:18,368 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41805-0x1015a0310050000 connected 2024-11-20T14:50:18,429 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:50:18,431 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:50:18,432 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:50:18,433 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd, hbase.cluster.distributed=false 2024-11-20T14:50:18,434 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:50:18,434 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41805 2024-11-20T14:50:18,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41805 2024-11-20T14:50:18,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41805 2024-11-20T14:50:18,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41805 2024-11-20T14:50:18,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41805 2024-11-20T14:50:18,451 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a15ecfd95f4:0 server-side Connection retries=45 2024-11-20T14:50:18,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:50:18,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T14:50:18,451 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T14:50:18,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T14:50:18,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T14:50:18,451 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T14:50:18,452 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T14:50:18,452 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37253 2024-11-20T14:50:18,453 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37253 connecting to ZooKeeper ensemble=127.0.0.1:62617 2024-11-20T14:50:18,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:50:18,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:50:18,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:372530x0, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T14:50:18,463 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:50:18,463 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37253-0x1015a0310050001 connected 2024-11-20T14:50:18,463 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T14:50:18,463 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T14:50:18,464 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T14:50:18,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T14:50:18,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37253 2024-11-20T14:50:18,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37253 2024-11-20T14:50:18,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37253 2024-11-20T14:50:18,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37253 2024-11-20T14:50:18,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37253 2024-11-20T14:50:18,480 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a15ecfd95f4:41805 2024-11-20T14:50:18,480 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:18,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:50:18,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:50:18,488 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:18,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T14:50:18,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,496 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T14:50:18,496 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a15ecfd95f4,41805,1732114218320 from backup master directory 2024-11-20T14:50:18,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:50:18,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:18,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T14:50:18,504 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:50:18,504 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:18,508 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/hbase.id] with ID: a8df8ee0-26f5-49b2-aa35-6ebda8d2b394 2024-11-20T14:50:18,508 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/.tmp/hbase.id 2024-11-20T14:50:18,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:50:18,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741826_1002 (size=42) 2024-11-20T14:50:18,515 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/.tmp/hbase.id]:[hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/hbase.id] 2024-11-20T14:50:18,526 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:50:18,526 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-20T14:50:18,528 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-20T14:50:18,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:50:18,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741827_1003 (size=196) 2024-11-20T14:50:18,543 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T14:50:18,544 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T14:50:18,544 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:50:18,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:50:18,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741828_1004 (size=1189) 2024-11-20T14:50:18,552 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store 2024-11-20T14:50:18,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:50:18,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741829_1005 (size=34) 2024-11-20T14:50:18,560 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:50:18,560 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:50:18,560 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:18,560 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:18,560 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:50:18,560 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:18,560 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:18,560 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114218560Disabling compacts and flushes for region at 1732114218560Disabling writes for close at 1732114218560Writing region close event to WAL at 1732114218560Closed at 1732114218560 2024-11-20T14:50:18,561 WARN [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/.initializing 2024-11-20T14:50:18,561 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/WALs/1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:18,563 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C41805%2C1732114218320, suffix=, logDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/WALs/1a15ecfd95f4,41805,1732114218320, archiveDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/oldWALs, maxLogs=10 2024-11-20T14:50:18,564 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C41805%2C1732114218320.1732114218564 2024-11-20T14:50:18,568 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/WALs/1a15ecfd95f4,41805,1732114218320/1a15ecfd95f4%2C41805%2C1732114218320.1732114218564 2024-11-20T14:50:18,569 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35977:35977),(127.0.0.1/127.0.0.1:37565:37565)] 2024-11-20T14:50:18,569 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:50:18,570 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:50:18,570 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,570 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,572 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T14:50:18,572 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,573 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:18,573 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T14:50:18,574 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:50:18,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T14:50:18,576 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:50:18,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T14:50:18,578 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T14:50:18,578 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,579 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,579 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,580 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,580 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,581 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T14:50:18,582 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T14:50:18,587 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:50:18,588 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786333, jitterRate=-1.2677907943725586E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T14:50:18,588 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732114218570Initializing all the Stores at 1732114218571 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114218571Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114218571Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114218571Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114218571Cleaning up temporary data from old regions at 1732114218580 (+9 ms)Region opened successfully at 1732114218588 (+8 ms) 2024-11-20T14:50:18,589 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T14:50:18,591 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@782c9b08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:50:18,592 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-20T14:50:18,592 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T14:50:18,592 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T14:50:18,593 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T14:50:18,593 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-20T14:50:18,593 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-20T14:50:18,593 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T14:50:18,600 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T14:50:18,601 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T14:50:18,637 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-20T14:50:18,637 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T14:50:18,638 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T14:50:18,645 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-20T14:50:18,646 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T14:50:18,647 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T14:50:18,654 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-20T14:50:18,655 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T14:50:18,662 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T14:50:18,666 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T14:50:18,670 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T14:50:18,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:50:18,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T14:50:18,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,680 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a15ecfd95f4,41805,1732114218320, sessionid=0x1015a0310050000, setting cluster-up flag (Was=false) 2024-11-20T14:50:18,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,720 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T14:50:18,722 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:18,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:18,762 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T14:50:18,764 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:18,778 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-20T14:50:18,779 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-20T14:50:18,780 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-20T14:50:18,780 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T14:50:18,780 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a15ecfd95f4,41805,1732114218320 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T14:50:18,781 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:50:18,781 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:50:18,781 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:50:18,781 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=5, maxPoolSize=5 2024-11-20T14:50:18,781 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a15ecfd95f4:0, corePoolSize=10, maxPoolSize=10 2024-11-20T14:50:18,781 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,781 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:50:18,782 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,782 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732114248782 2024-11-20T14:50:18,783 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T14:50:18,783 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T14:50:18,783 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T14:50:18,783 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T14:50:18,783 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T14:50:18,783 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T14:50:18,783 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:50:18,783 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-20T14:50:18,783 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,783 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T14:50:18,784 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T14:50:18,784 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T14:50:18,784 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T14:50:18,784 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T14:50:18,784 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114218784,5,FailOnTimeoutGroup] 2024-11-20T14:50:18,784 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,784 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114218784,5,FailOnTimeoutGroup] 2024-11-20T14:50:18,784 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,785 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T14:50:18,785 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,785 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,784 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T14:50:18,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:50:18,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741831_1007 (size=1321) 2024-11-20T14:50:18,792 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-20T14:50:18,792 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd 2024-11-20T14:50:18,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:50:18,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741832_1008 (size=32) 2024-11-20T14:50:18,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:50:18,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:50:18,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:50:18,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:18,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:50:18,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:50:18,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:18,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:50:18,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:50:18,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:18,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:50:18,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:50:18,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:18,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:18,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:50:18,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740 2024-11-20T14:50:18,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740 2024-11-20T14:50:18,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:50:18,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:50:18,808 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:50:18,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:50:18,811 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T14:50:18,811 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766586, jitterRate=-0.02523653209209442}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:50:18,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732114218798Initializing all the Stores at 1732114218799 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114218799Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114218799Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114218799Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114218799Cleaning up temporary data from old regions at 1732114218807 (+8 ms)Region opened successfully at 1732114218812 (+5 ms) 2024-11-20T14:50:18,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:50:18,812 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:50:18,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:50:18,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:50:18,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:50:18,812 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:50:18,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114218812Disabling compacts and flushes for region at 1732114218812Disabling writes for close at 1732114218812Writing region close event to WAL at 1732114218812Closed at 1732114218812 2024-11-20T14:50:18,813 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:50:18,813 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-20T14:50:18,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T14:50:18,815 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:50:18,816 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T14:50:18,868 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(746): ClusterId : a8df8ee0-26f5-49b2-aa35-6ebda8d2b394 2024-11-20T14:50:18,868 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T14:50:18,880 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T14:50:18,880 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T14:50:18,888 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T14:50:18,888 DEBUG [RS:0;1a15ecfd95f4:37253 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15290a24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a15ecfd95f4/172.17.0.2:0 2024-11-20T14:50:18,905 DEBUG [RS:0;1a15ecfd95f4:37253 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a15ecfd95f4:37253 2024-11-20T14:50:18,905 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-20T14:50:18,905 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-20T14:50:18,905 DEBUG [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-20T14:50:18,906 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a15ecfd95f4,41805,1732114218320 with port=37253, startcode=1732114218451 2024-11-20T14:50:18,906 DEBUG [RS:0;1a15ecfd95f4:37253 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T14:50:18,908 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49823, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T14:50:18,908 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41805 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:18,908 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41805 {}] master.ServerManager(517): Registering regionserver=1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:18,909 DEBUG [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd 2024-11-20T14:50:18,909 DEBUG [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37801 2024-11-20T14:50:18,909 DEBUG [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-20T14:50:18,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:50:18,921 DEBUG [RS:0;1a15ecfd95f4:37253 {}] zookeeper.ZKUtil(111): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:18,921 WARN [RS:0;1a15ecfd95f4:37253 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T14:50:18,921 INFO [RS:0;1a15ecfd95f4:37253 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:50:18,921 DEBUG [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:18,921 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a15ecfd95f4,37253,1732114218451] 2024-11-20T14:50:18,925 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T14:50:18,926 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T14:50:18,927 INFO [RS:0;1a15ecfd95f4:37253 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T14:50:18,927 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,927 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-20T14:50:18,928 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-20T14:50:18,928 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=2, maxPoolSize=2 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a15ecfd95f4:0, corePoolSize=1, maxPoolSize=1 2024-11-20T14:50:18,928 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:50:18,929 DEBUG [RS:0;1a15ecfd95f4:37253 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a15ecfd95f4:0, corePoolSize=3, maxPoolSize=3 2024-11-20T14:50:18,929 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,929 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,929 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,929 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,929 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,929 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,37253,1732114218451-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:50:18,948 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T14:50:18,948 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,37253,1732114218451-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,949 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,949 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.Replication(171): 1a15ecfd95f4,37253,1732114218451 started 2024-11-20T14:50:18,966 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:18,966 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1482): Serving as 1a15ecfd95f4,37253,1732114218451, RpcServer on 1a15ecfd95f4/172.17.0.2:37253, sessionid=0x1015a0310050001 2024-11-20T14:50:18,966 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T14:50:18,966 DEBUG [RS:0;1a15ecfd95f4:37253 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:18,966 WARN [1a15ecfd95f4:41805 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-20T14:50:18,966 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,37253,1732114218451' 2024-11-20T14:50:18,966 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T14:50:18,967 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T14:50:18,967 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T14:50:18,967 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T14:50:18,967 DEBUG [RS:0;1a15ecfd95f4:37253 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:18,967 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a15ecfd95f4,37253,1732114218451' 2024-11-20T14:50:18,967 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T14:50:18,967 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T14:50:18,968 DEBUG [RS:0;1a15ecfd95f4:37253 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T14:50:18,968 INFO [RS:0;1a15ecfd95f4:37253 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T14:50:18,968 INFO [RS:0;1a15ecfd95f4:37253 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T14:50:19,069 INFO [RS:0;1a15ecfd95f4:37253 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C37253%2C1732114218451, suffix=, logDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/1a15ecfd95f4,37253,1732114218451, archiveDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/oldWALs, maxLogs=32 2024-11-20T14:50:19,070 INFO [RS:0;1a15ecfd95f4:37253 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C37253%2C1732114218451.1732114219070 2024-11-20T14:50:19,075 INFO [RS:0;1a15ecfd95f4:37253 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/1a15ecfd95f4,37253,1732114218451/1a15ecfd95f4%2C37253%2C1732114218451.1732114219070 2024-11-20T14:50:19,075 DEBUG [RS:0;1a15ecfd95f4:37253 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35977:35977),(127.0.0.1/127.0.0.1:37565:37565)] 2024-11-20T14:50:19,216 DEBUG [1a15ecfd95f4:41805 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T14:50:19,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:19,218 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,37253,1732114218451, state=OPENING 2024-11-20T14:50:19,229 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T14:50:19,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:19,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:19,238 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T14:50:19,238 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:50:19,238 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:50:19,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,37253,1732114218451}] 2024-11-20T14:50:19,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,34813,1732114020050/1a15ecfd95f4%2C34813%2C1732114020050.1732114020311 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:19,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44451/user/jenkins/test-data/ed170cd0-9776-82b5-c6f2-a56a895ba6b3/WALs/1a15ecfd95f4,45919,1732114018559/1a15ecfd95f4%2C45919%2C1732114018559.meta.1732114019807.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-20T14:50:19,391 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T14:50:19,393 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43469, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T14:50:19,396 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-20T14:50:19,396 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:50:19,398 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a15ecfd95f4%2C37253%2C1732114218451.meta, suffix=.meta, logDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/1a15ecfd95f4,37253,1732114218451, archiveDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/oldWALs, maxLogs=32 2024-11-20T14:50:19,399 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1a15ecfd95f4%2C37253%2C1732114218451.meta.1732114219399.meta 2024-11-20T14:50:19,409 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/1a15ecfd95f4,37253,1732114218451/1a15ecfd95f4%2C37253%2C1732114218451.meta.1732114219399.meta 2024-11-20T14:50:19,418 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37565:37565),(127.0.0.1/127.0.0.1:35977:35977)] 2024-11-20T14:50:19,429 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T14:50:19,429 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T14:50:19,429 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T14:50:19,429 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T14:50:19,429 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T14:50:19,429 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T14:50:19,429 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-20T14:50:19,429 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-20T14:50:19,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T14:50:19,431 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T14:50:19,431 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:19,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:19,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-20T14:50:19,433 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-20T14:50:19,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:19,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:19,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T14:50:19,434 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T14:50:19,434 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:19,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:19,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T14:50:19,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T14:50:19,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T14:50:19,435 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T14:50:19,435 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-20T14:50:19,436 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740 2024-11-20T14:50:19,437 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740 2024-11-20T14:50:19,438 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-20T14:50:19,438 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-20T14:50:19,438 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T14:50:19,440 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-20T14:50:19,440 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838430, jitterRate=0.06611977517604828}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T14:50:19,440 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-20T14:50:19,441 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732114219430Writing region info on filesystem at 1732114219430Initializing all the Stores at 1732114219430Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114219430Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114219430Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732114219430Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732114219430Cleaning up temporary data from old regions at 1732114219438 (+8 ms)Running coprocessor post-open hooks at 1732114219440 (+2 ms)Region opened successfully at 1732114219441 (+1 ms) 2024-11-20T14:50:19,442 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732114219391 2024-11-20T14:50:19,444 DEBUG [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T14:50:19,444 INFO [RS_OPEN_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-20T14:50:19,444 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:19,445 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a15ecfd95f4,37253,1732114218451, state=OPEN 2024-11-20T14:50:19,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:50:19,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T14:50:19,501 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:19,501 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:50:19,501 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T14:50:19,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T14:50:19,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a15ecfd95f4,37253,1732114218451 in 263 msec 2024-11-20T14:50:19,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T14:50:19,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 690 msec 2024-11-20T14:50:19,508 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-20T14:50:19,508 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-20T14:50:19,510 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:50:19,510 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,37253,1732114218451, seqNum=-1] 2024-11-20T14:50:19,510 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:50:19,511 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50249, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:50:19,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 736 msec 2024-11-20T14:50:19,516 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732114219516, completionTime=-1 2024-11-20T14:50:19,516 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T14:50:19,516 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-20T14:50:19,518 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-20T14:50:19,518 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732114279518 2024-11-20T14:50:19,518 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732114339518 2024-11-20T14:50:19,518 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-20T14:50:19,519 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,41805,1732114218320-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:19,519 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,41805,1732114218320-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:19,519 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,41805,1732114218320-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:19,519 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a15ecfd95f4:41805, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:19,519 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:19,520 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:19,521 DEBUG [master/1a15ecfd95f4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-20T14:50:19,523 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.019sec 2024-11-20T14:50:19,523 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T14:50:19,523 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T14:50:19,523 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T14:50:19,523 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T14:50:19,523 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T14:50:19,523 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,41805,1732114218320-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T14:50:19,523 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,41805,1732114218320-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T14:50:19,525 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-20T14:50:19,525 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T14:50:19,525 INFO [master/1a15ecfd95f4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a15ecfd95f4,41805,1732114218320-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T14:50:19,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41581ad6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:50:19,569 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a15ecfd95f4,41805,-1 for getting cluster id 2024-11-20T14:50:19,569 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-20T14:50:19,570 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a8df8ee0-26f5-49b2-aa35-6ebda8d2b394' 2024-11-20T14:50:19,570 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-20T14:50:19,571 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a8df8ee0-26f5-49b2-aa35-6ebda8d2b394" 2024-11-20T14:50:19,571 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@532648ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:50:19,571 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a15ecfd95f4,41805,-1] 2024-11-20T14:50:19,571 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-20T14:50:19,571 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:19,572 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52790, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-20T14:50:19,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@203b129c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T14:50:19,573 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-20T14:50:19,574 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a15ecfd95f4,37253,1732114218451, seqNum=-1] 2024-11-20T14:50:19,575 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T14:50:19,576 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38174, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T14:50:19,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:19,578 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T14:50:19,580 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-20T14:50:19,580 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-20T14:50:19,582 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/test.com,8080,1, archiveDir=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/oldWALs, maxLogs=32 2024-11-20T14:50:19,583 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732114219582 2024-11-20T14:50:19,588 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/test.com,8080,1/test.com%2C8080%2C1.1732114219582 2024-11-20T14:50:19,588 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37565:37565),(127.0.0.1/127.0.0.1:35977:35977)] 2024-11-20T14:50:19,589 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732114219589 2024-11-20T14:50:19,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,597 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,598 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,598 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/test.com,8080,1/test.com%2C8080%2C1.1732114219582 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/test.com,8080,1/test.com%2C8080%2C1.1732114219589 2024-11-20T14:50:19,598 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35977:35977),(127.0.0.1/127.0.0.1:37565:37565)] 2024-11-20T14:50:19,598 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/test.com,8080,1/test.com%2C8080%2C1.1732114219582 is not closed yet, will try archiving it next time 2024-11-20T14:50:19,599 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,599 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741835_1011 (size=93) 2024-11-20T14:50:19,599 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,599 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741835_1011 (size=93) 2024-11-20T14:50:19,599 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,600 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/WALs/test.com,8080,1/test.com%2C8080%2C1.1732114219582 to hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/oldWALs/test.com%2C8080%2C1.1732114219582 2024-11-20T14:50:19,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741836_1012 (size=93) 2024-11-20T14:50:19,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741836_1012 (size=93) 2024-11-20T14:50:19,603 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/oldWALs 2024-11-20T14:50:19,603 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732114219589) 2024-11-20T14:50:19,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-20T14:50:19,603 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:50:19,603 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:50:19,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:19,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:19,603 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-20T14:50:19,604 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T14:50:19,604 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=496264051, stopped=false 2024-11-20T14:50:19,604 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a15ecfd95f4,41805,1732114218320 2024-11-20T14:50:19,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:50:19,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T14:50:19,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:19,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:19,628 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:50:19,629 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-20T14:50:19,629 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:50:19,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:19,629 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:50:19,629 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T14:50:19,629 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a15ecfd95f4,37253,1732114218451' ***** 2024-11-20T14:50:19,629 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-20T14:50:19,629 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T14:50:19,630 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(959): stopping server 1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a15ecfd95f4:37253. 2024-11-20T14:50:19,630 DEBUG [RS:0;1a15ecfd95f4:37253 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T14:50:19,630 DEBUG [RS:0;1a15ecfd95f4:37253 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-20T14:50:19,630 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-20T14:50:19,630 DEBUG [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T14:50:19,631 DEBUG [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-20T14:50:19,631 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-20T14:50:19,631 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-20T14:50:19,631 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-20T14:50:19,631 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T14:50:19,631 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T14:50:19,631 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-20T14:50:19,651 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740/.tmp/ns/b0260ad3cdd54d4c8b11e1dd36c8ed2f is 43, key is default/ns:d/1732114219512/Put/seqid=0 2024-11-20T14:50:19,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741837_1013 (size=5153) 2024-11-20T14:50:19,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741837_1013 (size=5153) 2024-11-20T14:50:19,656 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740/.tmp/ns/b0260ad3cdd54d4c8b11e1dd36c8ed2f 2024-11-20T14:50:19,661 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740/.tmp/ns/b0260ad3cdd54d4c8b11e1dd36c8ed2f as hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740/ns/b0260ad3cdd54d4c8b11e1dd36c8ed2f 2024-11-20T14:50:19,666 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740/ns/b0260ad3cdd54d4c8b11e1dd36c8ed2f, entries=2, sequenceid=6, filesize=5.0 K 2024-11-20T14:50:19,667 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=6, compaction requested=false 2024-11-20T14:50:19,667 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T14:50:19,671 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T14:50:19,671 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T14:50:19,671 INFO [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-20T14:50:19,671 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732114219631Running coprocessor pre-close hooks at 1732114219631Disabling compacts and flushes for region at 1732114219631Disabling writes for close at 1732114219631Obtaining lock to block concurrent updates at 1732114219631Preparing flush snapshotting stores in 1588230740 at 1732114219631Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732114219631Flushing stores of hbase:meta,,1.1588230740 at 1732114219632 (+1 ms)Flushing 1588230740/ns: creating writer at 1732114219632Flushing 1588230740/ns: appending metadata at 1732114219651 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732114219651Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3fc64249: reopening flushed file at 1732114219661 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=6, compaction requested=false at 1732114219667 (+6 ms)Writing region close event to WAL at 1732114219668 (+1 ms)Running coprocessor post-close hooks at 1732114219671 (+3 ms)Closed at 1732114219671 2024-11-20T14:50:19,671 DEBUG [RS_CLOSE_META-regionserver/1a15ecfd95f4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T14:50:19,831 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(976): stopping server 1a15ecfd95f4,37253,1732114218451; all regions closed. 2024-11-20T14:50:19,831 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,832 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,832 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,832 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,832 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741834_1010 (size=1152) 2024-11-20T14:50:19,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741834_1010 (size=1152) 2024-11-20T14:50:19,838 DEBUG [RS:0;1a15ecfd95f4:37253 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/oldWALs 2024-11-20T14:50:19,838 INFO [RS:0;1a15ecfd95f4:37253 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C37253%2C1732114218451.meta:.meta(num 1732114219399) 2024-11-20T14:50:19,838 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,838 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,839 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,839 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,839 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:19,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741833_1009 (size=93) 2024-11-20T14:50:19,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741833_1009 (size=93) 2024-11-20T14:50:19,844 DEBUG [RS:0;1a15ecfd95f4:37253 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/oldWALs 2024-11-20T14:50:19,844 INFO [RS:0;1a15ecfd95f4:37253 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1a15ecfd95f4%2C37253%2C1732114218451:(num 1732114219070) 2024-11-20T14:50:19,844 DEBUG [RS:0;1a15ecfd95f4:37253 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T14:50:19,844 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T14:50:19,844 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:50:19,844 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.ChoreService(370): Chore service for: regionserver/1a15ecfd95f4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-20T14:50:19,844 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:50:19,844 INFO [regionserver/1a15ecfd95f4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:50:19,844 INFO [RS:0;1a15ecfd95f4:37253 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37253 2024-11-20T14:50:19,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T14:50:19,870 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:50:19,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a15ecfd95f4,37253,1732114218451 2024-11-20T14:50:19,879 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a15ecfd95f4,37253,1732114218451] 2024-11-20T14:50:19,929 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a15ecfd95f4,37253,1732114218451 already deleted, retry=false 2024-11-20T14:50:19,929 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a15ecfd95f4,37253,1732114218451 expired; onlineServers=0 2024-11-20T14:50:19,929 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a15ecfd95f4,41805,1732114218320' ***** 2024-11-20T14:50:19,929 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T14:50:19,929 INFO [M:0;1a15ecfd95f4:41805 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-20T14:50:19,929 INFO [M:0;1a15ecfd95f4:41805 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-20T14:50:19,929 DEBUG [M:0;1a15ecfd95f4:41805 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T14:50:19,929 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T14:50:19,929 DEBUG [M:0;1a15ecfd95f4:41805 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T14:50:19,929 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114218784 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.large.0-1732114218784,5,FailOnTimeoutGroup] 2024-11-20T14:50:19,929 DEBUG [master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114218784 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a15ecfd95f4:0:becomeActiveMaster-HFileCleaner.small.0-1732114218784,5,FailOnTimeoutGroup] 2024-11-20T14:50:19,929 INFO [M:0;1a15ecfd95f4:41805 {}] hbase.ChoreService(370): Chore service for: master/1a15ecfd95f4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-20T14:50:19,930 INFO [M:0;1a15ecfd95f4:41805 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-20T14:50:19,930 DEBUG [M:0;1a15ecfd95f4:41805 {}] master.HMaster(1795): Stopping service threads 2024-11-20T14:50:19,930 INFO [M:0;1a15ecfd95f4:41805 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T14:50:19,930 INFO [M:0;1a15ecfd95f4:41805 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-20T14:50:19,930 INFO [M:0;1a15ecfd95f4:41805 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T14:50:19,930 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T14:50:19,937 DEBUG [M:0;1a15ecfd95f4:41805 {}] zookeeper.ZKUtil(347): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T14:50:19,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T14:50:19,937 WARN [M:0;1a15ecfd95f4:41805 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T14:50:19,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T14:50:19,938 INFO [M:0;1a15ecfd95f4:41805 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/.lastflushedseqids 2024-11-20T14:50:19,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741838_1014 (size=99) 2024-11-20T14:50:19,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741838_1014 (size=99) 2024-11-20T14:50:19,944 INFO [M:0;1a15ecfd95f4:41805 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-20T14:50:19,944 INFO [M:0;1a15ecfd95f4:41805 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T14:50:19,945 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T14:50:19,945 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:19,945 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:19,945 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T14:50:19,945 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:19,945 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-20T14:50:19,961 DEBUG [M:0;1a15ecfd95f4:41805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9e993f3af07e490984aa5b0314c0c5ac is 82, key is hbase:meta,,1/info:regioninfo/1732114219444/Put/seqid=0 2024-11-20T14:50:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741839_1015 (size=5672) 2024-11-20T14:50:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741839_1015 (size=5672) 2024-11-20T14:50:19,965 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9e993f3af07e490984aa5b0314c0c5ac 2024-11-20T14:50:19,979 INFO [RS:0;1a15ecfd95f4:37253 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:50:19,979 INFO [RS:0;1a15ecfd95f4:37253 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a15ecfd95f4,37253,1732114218451; zookeeper connection closed. 2024-11-20T14:50:19,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:50:19,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37253-0x1015a0310050001, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:50:19,979 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1bae0f85 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1bae0f85 2024-11-20T14:50:19,980 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T14:50:19,984 DEBUG [M:0;1a15ecfd95f4:41805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5da9a65be1cb47cd950c58a2eb184462 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732114219515/Put/seqid=0 2024-11-20T14:50:19,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741840_1016 (size=5275) 2024-11-20T14:50:19,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741840_1016 (size=5275) 2024-11-20T14:50:19,989 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5da9a65be1cb47cd950c58a2eb184462 2024-11-20T14:50:20,006 DEBUG [M:0;1a15ecfd95f4:41805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2a8c4e88aa664eedad4a6c6c197915b1 is 69, key is 1a15ecfd95f4,37253,1732114218451/rs:state/1732114218908/Put/seqid=0 2024-11-20T14:50:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741841_1017 (size=5156) 2024-11-20T14:50:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741841_1017 (size=5156) 2024-11-20T14:50:20,010 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2a8c4e88aa664eedad4a6c6c197915b1 2024-11-20T14:50:20,031 DEBUG [M:0;1a15ecfd95f4:41805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0540b192014e45ebb8fdeb4cd422c3aa is 52, key is load_balancer_on/state:d/1732114219579/Put/seqid=0 2024-11-20T14:50:20,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741842_1018 (size=5056) 2024-11-20T14:50:20,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741842_1018 (size=5056) 2024-11-20T14:50:20,036 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0540b192014e45ebb8fdeb4cd422c3aa 2024-11-20T14:50:20,041 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9e993f3af07e490984aa5b0314c0c5ac as hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9e993f3af07e490984aa5b0314c0c5ac 2024-11-20T14:50:20,047 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9e993f3af07e490984aa5b0314c0c5ac, entries=8, sequenceid=29, filesize=5.5 K 2024-11-20T14:50:20,048 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5da9a65be1cb47cd950c58a2eb184462 as hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5da9a65be1cb47cd950c58a2eb184462 2024-11-20T14:50:20,052 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5da9a65be1cb47cd950c58a2eb184462, entries=3, sequenceid=29, filesize=5.2 K 2024-11-20T14:50:20,053 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2a8c4e88aa664eedad4a6c6c197915b1 as hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2a8c4e88aa664eedad4a6c6c197915b1 2024-11-20T14:50:20,057 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2a8c4e88aa664eedad4a6c6c197915b1, entries=1, sequenceid=29, filesize=5.0 K 2024-11-20T14:50:20,058 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0540b192014e45ebb8fdeb4cd422c3aa as hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0540b192014e45ebb8fdeb4cd422c3aa 2024-11-20T14:50:20,063 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37801/user/jenkins/test-data/eb41c6d0-e884-f2cc-6009-317e82047ecd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0540b192014e45ebb8fdeb4cd422c3aa, entries=1, sequenceid=29, filesize=4.9 K 2024-11-20T14:50:20,064 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 118ms, sequenceid=29, compaction requested=false 2024-11-20T14:50:20,065 INFO [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T14:50:20,065 DEBUG [M:0;1a15ecfd95f4:41805 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732114219945Disabling compacts and flushes for region at 1732114219945Disabling writes for close at 1732114219945Obtaining lock to block concurrent updates at 1732114219945Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732114219945Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732114219945Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732114219946 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732114219946Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732114219961 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732114219961Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732114219969 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732114219984 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732114219984Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732114219992 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732114220005 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732114220005Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732114220015 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732114220031 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732114220031Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42a2ddae: reopening flushed file at 1732114220040 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3060ba3b: reopening flushed file at 1732114220047 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e1076d: reopening flushed file at 1732114220052 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52bff27d: reopening flushed file at 1732114220058 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 118ms, sequenceid=29, compaction requested=false at 1732114220064 (+6 ms)Writing region close event to WAL at 1732114220065 (+1 ms)Closed at 1732114220065 2024-11-20T14:50:20,065 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:20,065 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:20,065 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:20,065 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:20,065 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-20T14:50:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43765 is added to blk_1073741830_1006 (size=10311) 2024-11-20T14:50:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46457 is added to blk_1073741830_1006 (size=10311) 2024-11-20T14:50:20,068 INFO [M:0;1a15ecfd95f4:41805 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-20T14:50:20,068 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-20T14:50:20,068 INFO [M:0;1a15ecfd95f4:41805 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41805 2024-11-20T14:50:20,068 INFO [M:0;1a15ecfd95f4:41805 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-20T14:50:20,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:50:20,179 INFO [M:0;1a15ecfd95f4:41805 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-20T14:50:20,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41805-0x1015a0310050000, quorum=127.0.0.1:62617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T14:50:20,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4030687e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:50:20,181 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11685cbc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:50:20,181 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:50:20,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@395250f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:50:20,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7460995c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/hadoop.log.dir/,STOPPED} 2024-11-20T14:50:20,183 WARN [BP-2050894618-172.17.0.2-1732114216497 heartbeating to localhost/127.0.0.1:37801 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:50:20,183 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:50:20,183 WARN [BP-2050894618-172.17.0.2-1732114216497 heartbeating to localhost/127.0.0.1:37801 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2050894618-172.17.0.2-1732114216497 (Datanode Uuid 5d2cbc10-2843-4d33-8ae6-a17394b88c1b) service to localhost/127.0.0.1:37801 2024-11-20T14:50:20,183 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:50:20,184 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/data/data3/current/BP-2050894618-172.17.0.2-1732114216497 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:50:20,184 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/data/data4/current/BP-2050894618-172.17.0.2-1732114216497 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:50:20,184 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:50:20,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fb8d998{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T14:50:20,186 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4fe8057e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:50:20,186 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:50:20,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19b99f6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:50:20,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@255c5192{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/hadoop.log.dir/,STOPPED} 2024-11-20T14:50:20,187 WARN [BP-2050894618-172.17.0.2-1732114216497 heartbeating to localhost/127.0.0.1:37801 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T14:50:20,187 WARN [BP-2050894618-172.17.0.2-1732114216497 heartbeating to localhost/127.0.0.1:37801 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2050894618-172.17.0.2-1732114216497 (Datanode Uuid f781ca05-fa3d-4dcb-b438-9650e67bdf98) service to localhost/127.0.0.1:37801 2024-11-20T14:50:20,188 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/data/data1/current/BP-2050894618-172.17.0.2-1732114216497 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:50:20,188 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/cluster_e333dd5e-1d35-bc26-a1a6-7df22f7220fe/data/data2/current/BP-2050894618-172.17.0.2-1732114216497 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T14:50:20,188 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T14:50:20,188 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T14:50:20,188 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T14:50:20,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3264995b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T14:50:20,194 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ea1ad77{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T14:50:20,194 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T14:50:20,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d15ac40{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T14:50:20,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@280c54a4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1b2d1453-aad4-15db-fbea-88d480df0a47/hadoop.log.dir/,STOPPED} 2024-11-20T14:50:20,200 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-20T14:50:20,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-20T14:50:20,225 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 228) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37801 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:37801 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:37801 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37801 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:37801 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:37801 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37801 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37801 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=533 (was 506) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=214 (was 214), ProcessCount=11 (was 11), AvailableMemoryMB=9149 (was 9158)