2024-12-09 14:24:52,931 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 14:24:52,946 main DEBUG Took 0.012413 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 14:24:52,946 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 14:24:52,947 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 14:24:52,948 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 14:24:52,950 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,959 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 14:24:52,981 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,983 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,984 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,984 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,985 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,985 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,986 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,987 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,987 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,988 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,989 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,989 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,990 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,990 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,991 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,992 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,992 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,993 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,994 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,995 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,996 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,996 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 14:24:52,997 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:52,997 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 14:24:52,999 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 14:24:53,001 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 14:24:53,003 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 14:24:53,004 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 14:24:53,005 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 14:24:53,006 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 14:24:53,017 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 14:24:53,021 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 14:24:53,026 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 14:24:53,026 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 14:24:53,027 main DEBUG createAppenders(={Console}) 2024-12-09 14:24:53,028 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-09 14:24:53,028 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 14:24:53,029 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-09 14:24:53,029 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 14:24:53,030 main DEBUG OutputStream closed 2024-12-09 14:24:53,030 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 14:24:53,031 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 14:24:53,031 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-09 14:24:53,135 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 14:24:53,137 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 14:24:53,138 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 14:24:53,140 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 14:24:53,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 14:24:53,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 14:24:53,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 14:24:53,142 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 14:24:53,142 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 14:24:53,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 14:24:53,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 14:24:53,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 14:24:53,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 14:24:53,145 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 14:24:53,145 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 14:24:53,146 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 14:24:53,146 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 14:24:53,147 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 14:24:53,150 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 14:24:53,151 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-09 14:24:53,151 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 14:24:53,152 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-09T14:24:53,527 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394 2024-12-09 14:24:53,530 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 14:24:53,530 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T14:24:53,545 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-09T14:24:53,596 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=476, ProcessCount=11, AvailableMemoryMB=6454 2024-12-09T14:24:53,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T14:24:53,622 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469, deleteOnExit=true 2024-12-09T14:24:53,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T14:24:53,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/test.cache.data in system properties and HBase conf 2024-12-09T14:24:53,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T14:24:53,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.log.dir in system properties and HBase conf 2024-12-09T14:24:53,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T14:24:53,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T14:24:53,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T14:24:53,753 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T14:24:53,891 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T14:24:53,896 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:24:53,897 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:24:53,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T14:24:53,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:24:53,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T14:24:53,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T14:24:53,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:24:53,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:24:53,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T14:24:53,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/nfs.dump.dir in system properties and HBase conf 2024-12-09T14:24:53,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/java.io.tmpdir in system properties and HBase conf 2024-12-09T14:24:53,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:24:53,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T14:24:53,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T14:24:54,388 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:24:54,716 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T14:24:54,801 INFO [Time-limited test {}] log.Log(170): Logging initialized @2658ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T14:24:54,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:24:54,947 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:24:54,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:24:54,968 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:24:54,969 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:24:54,983 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:24:54,985 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:24:54,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:24:55,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/java.io.tmpdir/jetty-localhost-45937-hadoop-hdfs-3_4_1-tests_jar-_-any-7997131486415920842/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:24:55,182 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:45937} 2024-12-09T14:24:55,182 INFO [Time-limited test {}] server.Server(415): Started @3040ms 2024-12-09T14:24:55,210 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:24:55,559 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:24:55,568 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:24:55,572 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:24:55,572 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:24:55,573 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:24:55,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:24:55,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:24:55,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/java.io.tmpdir/jetty-localhost-46423-hadoop-hdfs-3_4_1-tests_jar-_-any-2722789951095683944/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:24:55,702 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:46423} 2024-12-09T14:24:55,703 INFO [Time-limited test {}] server.Server(415): Started @3560ms 2024-12-09T14:24:55,759 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:24:55,884 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:24:55,890 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:24:55,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:24:55,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:24:55,894 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:24:55,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:24:55,896 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:24:56,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/java.io.tmpdir/jetty-localhost-36815-hadoop-hdfs-3_4_1-tests_jar-_-any-5762126251193625558/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:24:56,049 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:36815} 2024-12-09T14:24:56,049 INFO [Time-limited test {}] server.Server(415): Started @3907ms 2024-12-09T14:24:56,051 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:24:56,209 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/data/data1/current/BP-334055039-172.17.0.3-1733754294478/current, will proceed with Du for space computation calculation, 2024-12-09T14:24:56,209 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/data/data4/current/BP-334055039-172.17.0.3-1733754294478/current, will proceed with Du for space computation calculation, 2024-12-09T14:24:56,209 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/data/data3/current/BP-334055039-172.17.0.3-1733754294478/current, will proceed with Du for space computation calculation, 2024-12-09T14:24:56,210 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/data/data2/current/BP-334055039-172.17.0.3-1733754294478/current, will proceed with Du for space computation calculation, 2024-12-09T14:24:56,275 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:24:56,279 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:24:56,362 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x79bcd9977a46522a with lease ID 0x1cf4d13f3b61969f: Processing first storage report for DS-4266e32f-b99b-4431-bad2-6fde9b399897 from datanode DatanodeRegistration(127.0.0.1:39635, datanodeUuid=a5b89841-b0d0-4cbe-9121-d1a427f33509, infoPort=45321, infoSecurePort=0, ipcPort=43751, storageInfo=lv=-57;cid=testClusterID;nsid=1118983752;c=1733754294478) 2024-12-09T14:24:56,364 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79bcd9977a46522a with lease ID 0x1cf4d13f3b61969f: from storage DS-4266e32f-b99b-4431-bad2-6fde9b399897 node DatanodeRegistration(127.0.0.1:39635, datanodeUuid=a5b89841-b0d0-4cbe-9121-d1a427f33509, infoPort=45321, infoSecurePort=0, ipcPort=43751, storageInfo=lv=-57;cid=testClusterID;nsid=1118983752;c=1733754294478), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T14:24:56,364 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x70d7f36b179b5bd1 with lease ID 0x1cf4d13f3b61969e: Processing first storage report for DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861 from datanode DatanodeRegistration(127.0.0.1:38529, datanodeUuid=640d0cff-c6b1-425c-af40-f6d4ccb7e7fb, infoPort=37545, infoSecurePort=0, ipcPort=39637, storageInfo=lv=-57;cid=testClusterID;nsid=1118983752;c=1733754294478) 2024-12-09T14:24:56,364 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x70d7f36b179b5bd1 with lease ID 0x1cf4d13f3b61969e: from storage DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861 node DatanodeRegistration(127.0.0.1:38529, datanodeUuid=640d0cff-c6b1-425c-af40-f6d4ccb7e7fb, infoPort=37545, infoSecurePort=0, ipcPort=39637, storageInfo=lv=-57;cid=testClusterID;nsid=1118983752;c=1733754294478), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:24:56,365 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x79bcd9977a46522a with lease ID 0x1cf4d13f3b61969f: Processing first storage report for DS-6fb241f9-f86a-4699-b813-1c6bd9b056fc from datanode DatanodeRegistration(127.0.0.1:39635, datanodeUuid=a5b89841-b0d0-4cbe-9121-d1a427f33509, infoPort=45321, infoSecurePort=0, ipcPort=43751, storageInfo=lv=-57;cid=testClusterID;nsid=1118983752;c=1733754294478) 2024-12-09T14:24:56,365 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79bcd9977a46522a with lease ID 0x1cf4d13f3b61969f: from storage DS-6fb241f9-f86a-4699-b813-1c6bd9b056fc node DatanodeRegistration(127.0.0.1:39635, datanodeUuid=a5b89841-b0d0-4cbe-9121-d1a427f33509, infoPort=45321, infoSecurePort=0, ipcPort=43751, storageInfo=lv=-57;cid=testClusterID;nsid=1118983752;c=1733754294478), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:24:56,365 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x70d7f36b179b5bd1 with lease ID 0x1cf4d13f3b61969e: Processing first storage report for DS-fe27b63c-87e2-491f-9fc0-447472eb94dc from datanode DatanodeRegistration(127.0.0.1:38529, datanodeUuid=640d0cff-c6b1-425c-af40-f6d4ccb7e7fb, infoPort=37545, infoSecurePort=0, ipcPort=39637, storageInfo=lv=-57;cid=testClusterID;nsid=1118983752;c=1733754294478) 2024-12-09T14:24:56,365 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x70d7f36b179b5bd1 with lease ID 0x1cf4d13f3b61969e: from storage DS-fe27b63c-87e2-491f-9fc0-447472eb94dc node DatanodeRegistration(127.0.0.1:38529, datanodeUuid=640d0cff-c6b1-425c-af40-f6d4ccb7e7fb, infoPort=37545, infoSecurePort=0, ipcPort=39637, storageInfo=lv=-57;cid=testClusterID;nsid=1118983752;c=1733754294478), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:24:56,443 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394 2024-12-09T14:24:56,517 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/zookeeper_0, clientPort=62538, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T14:24:56,526 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62538 2024-12-09T14:24:56,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:24:56,538 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:24:56,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:24:56,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:24:57,158 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d with version=8 2024-12-09T14:24:57,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase-staging 2024-12-09T14:24:57,246 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T14:24:57,535 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:24:57,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:24:57,549 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:24:57,555 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:24:57,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:24:57,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:24:57,739 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T14:24:57,826 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T14:24:57,843 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T14:24:57,848 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:24:57,886 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28731 (auto-detected) 2024-12-09T14:24:57,887 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-09T14:24:57,917 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:32895 2024-12-09T14:24:57,952 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32895 connecting to ZooKeeper ensemble=127.0.0.1:62538 2024-12-09T14:24:58,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:328950x0, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:24:58,004 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32895-0x1012b92a8d30000 connected 2024-12-09T14:24:58,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:24:58,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:24:58,086 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:24:58,091 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d, hbase.cluster.distributed=false 2024-12-09T14:24:58,120 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:24:58,125 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32895 2024-12-09T14:24:58,125 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32895 2024-12-09T14:24:58,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32895 2024-12-09T14:24:58,128 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32895 2024-12-09T14:24:58,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32895 2024-12-09T14:24:58,242 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:24:58,243 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:24:58,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:24:58,244 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:24:58,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:24:58,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:24:58,247 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T14:24:58,249 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:24:58,250 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36625 2024-12-09T14:24:58,252 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36625 connecting to ZooKeeper ensemble=127.0.0.1:62538 2024-12-09T14:24:58,253 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:24:58,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:24:58,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:366250x0, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:24:58,265 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:24:58,265 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36625-0x1012b92a8d30001 connected 2024-12-09T14:24:58,269 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T14:24:58,277 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T14:24:58,279 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T14:24:58,284 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:24:58,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36625 2024-12-09T14:24:58,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36625 2024-12-09T14:24:58,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36625 2024-12-09T14:24:58,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36625 2024-12-09T14:24:58,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36625 2024-12-09T14:24:58,301 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f4e784dc7cb5:32895 2024-12-09T14:24:58,302 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f4e784dc7cb5,32895,1733754297296 2024-12-09T14:24:58,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:24:58,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:24:58,311 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f4e784dc7cb5,32895,1733754297296 2024-12-09T14:24:58,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T14:24:58,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:58,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:58,334 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T14:24:58,335 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f4e784dc7cb5,32895,1733754297296 from backup master directory 2024-12-09T14:24:58,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:24:58,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f4e784dc7cb5,32895,1733754297296 2024-12-09T14:24:58,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:24:58,339 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:24:58,339 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f4e784dc7cb5,32895,1733754297296 2024-12-09T14:24:58,341 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T14:24:58,342 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T14:24:58,406 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase.id] with ID: e1382ac6-cbf0-4095-aef9-14b1bba81a31 2024-12-09T14:24:58,406 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/.tmp/hbase.id 2024-12-09T14:24:58,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:24:58,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:24:58,419 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/.tmp/hbase.id]:[hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase.id] 2024-12-09T14:24:58,460 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:24:58,465 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T14:24:58,487 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 20ms. 2024-12-09T14:24:58,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:58,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:58,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:24:58,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:24:58,529 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:24:58,531 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T14:24:58,537 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:24:58,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:24:58,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:24:58,585 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store 2024-12-09T14:24:58,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:24:58,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:24:58,610 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T14:24:58,614 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:24:58,616 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:24:58,616 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:24:58,617 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:24:58,618 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:24:58,619 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:24:58,619 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:24:58,621 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754298616Disabling compacts and flushes for region at 1733754298616Disabling writes for close at 1733754298619 (+3 ms)Writing region close event to WAL at 1733754298619Closed at 1733754298619 2024-12-09T14:24:58,623 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/.initializing 2024-12-09T14:24:58,623 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/WALs/f4e784dc7cb5,32895,1733754297296 2024-12-09T14:24:58,648 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C32895%2C1733754297296, suffix=, logDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/WALs/f4e784dc7cb5,32895,1733754297296, archiveDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/oldWALs, maxLogs=10 2024-12-09T14:24:58,658 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C32895%2C1733754297296.1733754298654 2024-12-09T14:24:58,686 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/WALs/f4e784dc7cb5,32895,1733754297296/f4e784dc7cb5%2C32895%2C1733754297296.1733754298654 2024-12-09T14:24:58,700 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:37545:37545)] 2024-12-09T14:24:58,711 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:24:58,711 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:24:58,716 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,717 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,776 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T14:24:58,810 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:58,813 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:58,813 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T14:24:58,817 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:58,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:24:58,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T14:24:58,821 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:58,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:24:58,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T14:24:58,826 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:58,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:24:58,827 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,832 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,833 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,840 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,840 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,844 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T14:24:58,848 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:24:58,854 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:24:58,855 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723236, jitterRate=-0.08035856485366821}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T14:24:58,864 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733754298737Initializing all the Stores at 1733754298740 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754298741 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754298742 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754298743 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754298743Cleaning up temporary data from old regions at 1733754298840 (+97 ms)Region opened successfully at 1733754298863 (+23 ms) 2024-12-09T14:24:58,865 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T14:24:58,900 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37974927, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:24:58,932 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T14:24:58,943 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T14:24:58,943 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T14:24:58,946 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T14:24:58,948 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T14:24:58,952 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-09T14:24:58,952 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T14:24:58,978 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T14:24:58,986 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T14:24:58,988 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T14:24:58,991 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T14:24:58,992 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T14:24:58,995 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T14:24:58,997 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T14:24:59,001 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T14:24:59,003 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T14:24:59,004 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T14:24:59,005 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T14:24:59,022 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T14:24:59,024 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T14:24:59,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:24:59,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:24:59,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:59,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:59,032 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f4e784dc7cb5,32895,1733754297296, sessionid=0x1012b92a8d30000, setting cluster-up flag (Was=false) 2024-12-09T14:24:59,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:59,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:59,052 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T14:24:59,054 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,32895,1733754297296 2024-12-09T14:24:59,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:59,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:59,073 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T14:24:59,075 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,32895,1733754297296 2024-12-09T14:24:59,081 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T14:24:59,091 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(746): ClusterId : e1382ac6-cbf0-4095-aef9-14b1bba81a31 2024-12-09T14:24:59,093 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T14:24:59,098 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T14:24:59,098 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T14:24:59,102 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T14:24:59,102 DEBUG [RS:0;f4e784dc7cb5:36625 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@579036d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:24:59,116 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f4e784dc7cb5:36625 2024-12-09T14:24:59,119 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T14:24:59,119 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T14:24:59,119 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T14:24:59,122 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,32895,1733754297296 with port=36625, startcode=1733754298203 2024-12-09T14:24:59,133 DEBUG [RS:0;f4e784dc7cb5:36625 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T14:24:59,157 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T14:24:59,168 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T14:24:59,179 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T14:24:59,185 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f4e784dc7cb5,32895,1733754297296 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T14:24:59,194 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:24:59,194 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:24:59,194 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:24:59,195 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:24:59,195 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f4e784dc7cb5:0, corePoolSize=10, maxPoolSize=10 2024-12-09T14:24:59,195 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,195 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:24:59,195 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,196 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733754329196 2024-12-09T14:24:59,199 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T14:24:59,199 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39351, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T14:24:59,200 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T14:24:59,202 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:24:59,203 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T14:24:59,204 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T14:24:59,204 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T14:24:59,204 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T14:24:59,204 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T14:24:59,205 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,208 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T14:24:59,209 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T14:24:59,209 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T14:24:59,206 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32895 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T14:24:59,211 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,211 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T14:24:59,211 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T14:24:59,212 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T14:24:59,214 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754299213,5,FailOnTimeoutGroup] 2024-12-09T14:24:59,214 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754299214,5,FailOnTimeoutGroup] 2024-12-09T14:24:59,215 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,215 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T14:24:59,216 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,217 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:24:59,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:24:59,229 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T14:24:59,230 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d 2024-12-09T14:24:59,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:24:59,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:24:59,245 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T14:24:59,245 WARN [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T14:24:59,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:24:59,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:24:59,250 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:24:59,250 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:59,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:24:59,253 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:24:59,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:59,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:24:59,257 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:24:59,257 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:59,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:24:59,260 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:24:59,260 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:59,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:24:59,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740 2024-12-09T14:24:59,263 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740 2024-12-09T14:24:59,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:24:59,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:24:59,266 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:24:59,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:24:59,273 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:24:59,273 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868165, jitterRate=0.10392925143241882}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:24:59,276 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733754299246Initializing all the Stores at 1733754299247 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754299248 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754299248Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754299248Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754299248Cleaning up temporary data from old regions at 1733754299266 (+18 ms)Region opened successfully at 1733754299275 (+9 ms) 2024-12-09T14:24:59,276 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:24:59,276 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:24:59,276 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:24:59,276 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:24:59,276 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:24:59,277 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:24:59,278 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754299276Disabling compacts and flushes for region at 1733754299276Disabling writes for close at 1733754299276Writing region close event to WAL at 1733754299277 (+1 ms)Closed at 1733754299277 2024-12-09T14:24:59,281 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:24:59,281 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T14:24:59,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T14:24:59,295 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:24:59,298 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T14:24:59,347 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,32895,1733754297296 with port=36625, startcode=1733754298203 2024-12-09T14:24:59,349 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32895 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f4e784dc7cb5,36625,1733754298203 2024-12-09T14:24:59,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32895 {}] master.ServerManager(517): Registering regionserver=f4e784dc7cb5,36625,1733754298203 2024-12-09T14:24:59,360 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d 2024-12-09T14:24:59,360 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44241 2024-12-09T14:24:59,361 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T14:24:59,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:24:59,365 DEBUG [RS:0;f4e784dc7cb5:36625 {}] zookeeper.ZKUtil(111): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f4e784dc7cb5,36625,1733754298203 2024-12-09T14:24:59,366 WARN [RS:0;f4e784dc7cb5:36625 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:24:59,366 INFO [RS:0;f4e784dc7cb5:36625 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:24:59,366 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203 2024-12-09T14:24:59,368 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f4e784dc7cb5,36625,1733754298203] 2024-12-09T14:24:59,392 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T14:24:59,405 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T14:24:59,411 INFO [RS:0;f4e784dc7cb5:36625 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:24:59,411 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,412 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T14:24:59,418 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T14:24:59,419 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,419 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,420 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,420 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,420 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,420 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,420 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:24:59,420 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,420 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,421 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,421 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,421 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,421 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:24:59,421 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:24:59,421 DEBUG [RS:0;f4e784dc7cb5:36625 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:24:59,422 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,422 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,423 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,423 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,423 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,423 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,36625,1733754298203-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:24:59,444 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T14:24:59,446 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,36625,1733754298203-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,446 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,447 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.Replication(171): f4e784dc7cb5,36625,1733754298203 started 2024-12-09T14:24:59,449 WARN [f4e784dc7cb5:32895 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T14:24:59,465 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:24:59,466 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1482): Serving as f4e784dc7cb5,36625,1733754298203, RpcServer on f4e784dc7cb5/172.17.0.3:36625, sessionid=0x1012b92a8d30001 2024-12-09T14:24:59,467 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T14:24:59,467 DEBUG [RS:0;f4e784dc7cb5:36625 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f4e784dc7cb5,36625,1733754298203 2024-12-09T14:24:59,467 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,36625,1733754298203' 2024-12-09T14:24:59,467 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T14:24:59,468 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T14:24:59,469 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T14:24:59,469 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T14:24:59,469 DEBUG [RS:0;f4e784dc7cb5:36625 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f4e784dc7cb5,36625,1733754298203 2024-12-09T14:24:59,469 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,36625,1733754298203' 2024-12-09T14:24:59,469 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T14:24:59,470 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T14:24:59,471 DEBUG [RS:0;f4e784dc7cb5:36625 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T14:24:59,471 INFO [RS:0;f4e784dc7cb5:36625 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T14:24:59,471 INFO [RS:0;f4e784dc7cb5:36625 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T14:24:59,579 INFO [RS:0;f4e784dc7cb5:36625 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C36625%2C1733754298203, suffix=, logDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203, archiveDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs, maxLogs=32 2024-12-09T14:24:59,581 INFO [RS:0;f4e784dc7cb5:36625 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754299581 2024-12-09T14:24:59,590 INFO [RS:0;f4e784dc7cb5:36625 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754299581 2024-12-09T14:24:59,591 DEBUG [RS:0;f4e784dc7cb5:36625 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37545:37545),(127.0.0.1/127.0.0.1:45321:45321)] 2024-12-09T14:24:59,701 DEBUG [f4e784dc7cb5:32895 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T14:24:59,715 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f4e784dc7cb5,36625,1733754298203 2024-12-09T14:24:59,723 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,36625,1733754298203, state=OPENING 2024-12-09T14:24:59,729 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T14:24:59,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:59,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:24:59,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:24:59,732 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:24:59,733 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:24:59,736 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,36625,1733754298203}] 2024-12-09T14:24:59,912 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T14:24:59,915 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59583, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T14:24:59,927 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T14:24:59,927 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:24:59,930 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C36625%2C1733754298203.meta, suffix=.meta, logDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203, archiveDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs, maxLogs=32 2024-12-09T14:24:59,932 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.meta.1733754299932.meta 2024-12-09T14:24:59,940 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.meta.1733754299932.meta 2024-12-09T14:24:59,942 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37545:37545),(127.0.0.1/127.0.0.1:45321:45321)] 2024-12-09T14:24:59,944 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:24:59,946 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T14:24:59,948 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T14:24:59,953 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T14:24:59,957 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T14:24:59,957 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:24:59,958 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T14:24:59,958 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T14:24:59,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:24:59,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:24:59,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:59,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:24:59,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:24:59,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:59,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:24:59,969 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:24:59,969 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:59,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:24:59,972 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:24:59,972 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:24:59,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:24:59,973 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:24:59,974 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740 2024-12-09T14:24:59,978 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740 2024-12-09T14:24:59,981 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:24:59,981 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:24:59,982 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:24:59,986 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:24:59,988 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822147, jitterRate=0.04541471600532532}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:24:59,989 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T14:24:59,991 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733754299958Writing region info on filesystem at 1733754299958Initializing all the Stores at 1733754299960 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754299960Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754299961 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754299961Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754299961Cleaning up temporary data from old regions at 1733754299981 (+20 ms)Running coprocessor post-open hooks at 1733754299989 (+8 ms)Region opened successfully at 1733754299990 (+1 ms) 2024-12-09T14:25:00,000 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733754299903 2024-12-09T14:25:00,017 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T14:25:00,017 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T14:25:00,021 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,36625,1733754298203 2024-12-09T14:25:00,025 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,36625,1733754298203, state=OPEN 2024-12-09T14:25:00,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:25:00,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:25:00,033 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:25:00,033 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:25:00,034 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,36625,1733754298203 2024-12-09T14:25:00,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T14:25:00,042 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,36625,1733754298203 in 299 msec 2024-12-09T14:25:00,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T14:25:00,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 757 msec 2024-12-09T14:25:00,053 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:25:00,053 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T14:25:00,079 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:25:00,081 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,36625,1733754298203, seqNum=-1] 2024-12-09T14:25:00,104 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:25:00,106 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50439, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:25:00,129 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0150 sec 2024-12-09T14:25:00,129 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733754300129, completionTime=-1 2024-12-09T14:25:00,133 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T14:25:00,133 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T14:25:00,165 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T14:25:00,165 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733754360165 2024-12-09T14:25:00,165 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733754420165 2024-12-09T14:25:00,165 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 32 msec 2024-12-09T14:25:00,169 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,32895,1733754297296-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:25:00,169 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,32895,1733754297296-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:25:00,169 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,32895,1733754297296-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:25:00,171 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f4e784dc7cb5:32895, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:25:00,172 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T14:25:00,172 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T14:25:00,179 DEBUG [master/f4e784dc7cb5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T14:25:00,201 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.862sec 2024-12-09T14:25:00,202 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T14:25:00,203 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T14:25:00,204 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T14:25:00,205 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T14:25:00,205 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T14:25:00,206 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,32895,1733754297296-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:25:00,206 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,32895,1733754297296-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T14:25:00,215 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T14:25:00,216 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T14:25:00,217 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,32895,1733754297296-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:25:00,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48e858bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:25:00,303 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T14:25:00,303 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T14:25:00,306 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f4e784dc7cb5,32895,-1 for getting cluster id 2024-12-09T14:25:00,309 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T14:25:00,318 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e1382ac6-cbf0-4095-aef9-14b1bba81a31' 2024-12-09T14:25:00,321 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T14:25:00,321 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e1382ac6-cbf0-4095-aef9-14b1bba81a31" 2024-12-09T14:25:00,323 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5679148d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:25:00,324 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f4e784dc7cb5,32895,-1] 2024-12-09T14:25:00,326 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T14:25:00,328 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:25:00,330 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60076, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T14:25:00,333 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b20e2b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:25:00,334 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:25:00,340 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,36625,1733754298203, seqNum=-1] 2024-12-09T14:25:00,341 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:25:00,343 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:32788, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:25:00,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f4e784dc7cb5,32895,1733754297296 2024-12-09T14:25:00,390 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:25:00,399 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T14:25:00,404 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T14:25:00,413 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is f4e784dc7cb5,32895,1733754297296 2024-12-09T14:25:00,416 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@571ad612 2024-12-09T14:25:00,417 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T14:25:00,420 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60086, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T14:25:00,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32895 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T14:25:00,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32895 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T14:25:00,427 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32895 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:25:00,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32895 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-09T14:25:00,441 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T14:25:00,443 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32895 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-09T14:25:00,443 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:25:00,445 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T14:25:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:25:00,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741835_1011 (size=389) 2024-12-09T14:25:00,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741835_1011 (size=389) 2024-12-09T14:25:00,495 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => da2e5e20807643dc49235f0603d84567, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d 2024-12-09T14:25:00,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741836_1012 (size=72) 2024-12-09T14:25:00,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741836_1012 (size=72) 2024-12-09T14:25:00,506 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:25:00,507 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing da2e5e20807643dc49235f0603d84567, disabling compactions & flushes 2024-12-09T14:25:00,507 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:25:00,507 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:25:00,507 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. after waiting 0 ms 2024-12-09T14:25:00,507 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:25:00,507 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:25:00,507 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for da2e5e20807643dc49235f0603d84567: Waiting for close lock at 1733754300507Disabling compacts and flushes for region at 1733754300507Disabling writes for close at 1733754300507Writing region close event to WAL at 1733754300507Closed at 1733754300507 2024-12-09T14:25:00,509 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T14:25:00,514 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733754300509"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733754300509"}]},"ts":"1733754300509"} 2024-12-09T14:25:00,520 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T14:25:00,522 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T14:25:00,524 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754300522"}]},"ts":"1733754300522"} 2024-12-09T14:25:00,529 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-09T14:25:00,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da2e5e20807643dc49235f0603d84567, ASSIGN}] 2024-12-09T14:25:00,534 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da2e5e20807643dc49235f0603d84567, ASSIGN 2024-12-09T14:25:00,536 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da2e5e20807643dc49235f0603d84567, ASSIGN; state=OFFLINE, location=f4e784dc7cb5,36625,1733754298203; forceNewPlan=false, retain=false 2024-12-09T14:25:00,687 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da2e5e20807643dc49235f0603d84567, regionState=OPENING, regionLocation=f4e784dc7cb5,36625,1733754298203 2024-12-09T14:25:00,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da2e5e20807643dc49235f0603d84567, ASSIGN because future has completed 2024-12-09T14:25:00,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da2e5e20807643dc49235f0603d84567, server=f4e784dc7cb5,36625,1733754298203}] 2024-12-09T14:25:00,854 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:25:00,855 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => da2e5e20807643dc49235f0603d84567, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:25:00,855 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,855 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:25:00,856 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,856 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,858 INFO [StoreOpener-da2e5e20807643dc49235f0603d84567-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,860 INFO [StoreOpener-da2e5e20807643dc49235f0603d84567-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da2e5e20807643dc49235f0603d84567 columnFamilyName info 2024-12-09T14:25:00,860 DEBUG [StoreOpener-da2e5e20807643dc49235f0603d84567-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:25:00,861 INFO [StoreOpener-da2e5e20807643dc49235f0603d84567-1 {}] regionserver.HStore(327): Store=da2e5e20807643dc49235f0603d84567/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:25:00,862 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,863 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,864 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,865 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,865 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,868 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,872 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:25:00,873 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened da2e5e20807643dc49235f0603d84567; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749698, jitterRate=-0.046710580587387085}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T14:25:00,873 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:00,874 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for da2e5e20807643dc49235f0603d84567: Running coprocessor pre-open hook at 1733754300856Writing region info on filesystem at 1733754300856Initializing all the Stores at 1733754300858 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754300858Cleaning up temporary data from old regions at 1733754300865 (+7 ms)Running coprocessor post-open hooks at 1733754300873 (+8 ms)Region opened successfully at 1733754300874 (+1 ms) 2024-12-09T14:25:00,876 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567., pid=6, masterSystemTime=1733754300848 2024-12-09T14:25:00,880 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:25:00,880 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:25:00,881 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da2e5e20807643dc49235f0603d84567, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,36625,1733754298203 2024-12-09T14:25:00,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da2e5e20807643dc49235f0603d84567, server=f4e784dc7cb5,36625,1733754298203 because future has completed 2024-12-09T14:25:00,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T14:25:00,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure da2e5e20807643dc49235f0603d84567, server=f4e784dc7cb5,36625,1733754298203 in 194 msec 2024-12-09T14:25:00,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T14:25:00,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da2e5e20807643dc49235f0603d84567, ASSIGN in 360 msec 2024-12-09T14:25:00,897 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T14:25:00,897 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754300897"}]},"ts":"1733754300897"} 2024-12-09T14:25:00,900 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-09T14:25:00,902 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T14:25:00,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 471 msec 2024-12-09T14:25:05,461 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T14:25:05,506 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T14:25:05,507 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-09T14:25:07,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T14:25:07,823 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T14:25:07,824 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T14:25:07,824 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T14:25:07,825 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:25:07,825 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T14:25:07,825 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T14:25:07,826 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T14:25:10,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:25:10,498 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-09T14:25:10,501 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-09T14:25:10,508 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-09T14:25:10,508 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:25:10,509 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754310509 2024-12-09T14:25:10,518 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:10,518 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:10,518 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:10,519 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:10,519 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:10,519 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754299581 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754310509 2024-12-09T14:25:10,520 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37545:37545),(127.0.0.1/127.0.0.1:45321:45321)] 2024-12-09T14:25:10,521 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754299581 is not closed yet, will try archiving it next time 2024-12-09T14:25:10,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741833_1009 (size=451) 2024-12-09T14:25:10,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741833_1009 (size=451) 2024-12-09T14:25:10,524 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754299581 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs/f4e784dc7cb5%2C36625%2C1733754298203.1733754299581 2024-12-09T14:25:10,529 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567., hostname=f4e784dc7cb5,36625,1733754298203, seqNum=2] 2024-12-09T14:25:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36625 {}] regionserver.HRegion(8855): Flush requested on da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:22,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing da2e5e20807643dc49235f0603d84567 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:25:22,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/cae6da6f77f541eb9b7c3feed61b9a63 is 1080, key is row0001/info:/1733754310532/Put/seqid=0 2024-12-09T14:25:22,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741838_1014 (size=12509) 2024-12-09T14:25:22,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741838_1014 (size=12509) 2024-12-09T14:25:22,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/cae6da6f77f541eb9b7c3feed61b9a63 2024-12-09T14:25:22,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/cae6da6f77f541eb9b7c3feed61b9a63 as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/cae6da6f77f541eb9b7c3feed61b9a63 2024-12-09T14:25:22,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/cae6da6f77f541eb9b7c3feed61b9a63, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T14:25:22,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for da2e5e20807643dc49235f0603d84567 in 161ms, sequenceid=11, compaction requested=false 2024-12-09T14:25:22,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for da2e5e20807643dc49235f0603d84567: 2024-12-09T14:25:26,440 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T14:25:30,577 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754330577 2024-12-09T14:25:30,788 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:25:30,789 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:30,789 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:30,789 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:30,789 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:30,789 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:30,790 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754310509 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754330577 2024-12-09T14:25:30,791 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:37545:37545)] 2024-12-09T14:25:30,791 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754310509 is not closed yet, will try archiving it next time 2024-12-09T14:25:30,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741837_1013 (size=12399) 2024-12-09T14:25:30,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741837_1013 (size=12399) 2024-12-09T14:25:30,995 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:33,200 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:35,404 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:37,608 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:37,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36625 {}] regionserver.HRegion(8855): Flush requested on da2e5e20807643dc49235f0603d84567 2024-12-09T14:25:37,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing da2e5e20807643dc49235f0603d84567 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:25:37,811 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:37,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/c56a716f19f14f7cb94ee05c580bcd5f is 1080, key is row0008/info:/1733754324563/Put/seqid=0 2024-12-09T14:25:37,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741840_1016 (size=12509) 2024-12-09T14:25:37,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741840_1016 (size=12509) 2024-12-09T14:25:37,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/c56a716f19f14f7cb94ee05c580bcd5f 2024-12-09T14:25:37,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/c56a716f19f14f7cb94ee05c580bcd5f as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/c56a716f19f14f7cb94ee05c580bcd5f 2024-12-09T14:25:37,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/c56a716f19f14f7cb94ee05c580bcd5f, entries=7, sequenceid=21, filesize=12.2 K 2024-12-09T14:25:38,051 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:38,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for da2e5e20807643dc49235f0603d84567 in 442ms, sequenceid=21, compaction requested=false 2024-12-09T14:25:38,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for da2e5e20807643dc49235f0603d84567: 2024-12-09T14:25:38,052 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-09T14:25:38,052 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:25:38,053 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/cae6da6f77f541eb9b7c3feed61b9a63 because midkey is the same as first or last row 2024-12-09T14:25:39,812 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:40,275 INFO [master/f4e784dc7cb5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T14:25:40,275 INFO [master/f4e784dc7cb5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T14:25:42,016 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:42,018 WARN [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:42,019 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C36625%2C1733754298203:(num 1733754330577) roll requested 2024-12-09T14:25:42,020 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754342019 2024-12-09T14:25:42,228 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:25:42,228 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:42,228 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:42,229 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:42,229 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:42,229 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:42,229 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754330577 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754342019 2024-12-09T14:25:42,230 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37545:37545),(127.0.0.1/127.0.0.1:45321:45321)] 2024-12-09T14:25:42,230 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754330577 is not closed yet, will try archiving it next time 2024-12-09T14:25:42,230 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754310509 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs/f4e784dc7cb5%2C36625%2C1733754298203.1733754310509 2024-12-09T14:25:42,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741839_1015 (size=7739) 2024-12-09T14:25:42,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741839_1015 (size=7739) 2024-12-09T14:25:44,220 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:25:45,856 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region da2e5e20807643dc49235f0603d84567, had cached 0 bytes from a total of 25018 2024-12-09T14:25:46,424 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:25:48,628 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:25:50,832 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:25:52,834 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T14:25:52,834 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754352834 2024-12-09T14:25:56,441 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T14:25:57,843 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:25:57,845 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:25:57,845 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C36625%2C1733754298203:(num 1733754352834) roll requested 2024-12-09T14:25:57,845 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:57,845 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:57,845 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:57,846 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:57,846 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:25:57,846 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754342019 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754352834 2024-12-09T14:25:57,847 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:37545:37545)] 2024-12-09T14:25:57,847 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754342019 is not closed yet, will try archiving it next time 2024-12-09T14:25:57,848 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754357848 2024-12-09T14:25:57,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741841_1017 (size=4753) 2024-12-09T14:25:57,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741841_1017 (size=4753) 2024-12-09T14:26:02,851 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:26:02,851 WARN [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:26:02,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36625 {}] regionserver.HRegion(8855): Flush requested on da2e5e20807643dc49235f0603d84567 2024-12-09T14:26:02,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing da2e5e20807643dc49235f0603d84567 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:26:02,857 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:26:02,857 WARN [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:26:04,852 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T14:26:07,854 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:26:07,854 WARN [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK], DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK]] 2024-12-09T14:26:07,854 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:07,854 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:07,854 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:07,855 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:07,855 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:07,855 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754352834 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754357848 2024-12-09T14:26:07,856 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37545:37545),(127.0.0.1/127.0.0.1:45321:45321)] 2024-12-09T14:26:07,856 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754352834 is not closed yet, will try archiving it next time 2024-12-09T14:26:07,856 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C36625%2C1733754298203:(num 1733754357848) roll requested 2024-12-09T14:26:07,857 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754367856 2024-12-09T14:26:07,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741842_1018 (size=1569) 2024-12-09T14:26:07,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741842_1018 (size=1569) 2024-12-09T14:26:07,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/af12296291d146ea9e9c2f7d4b55ed45 is 1080, key is row0015/info:/1733754339611/Put/seqid=0 2024-12-09T14:26:07,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741844_1020 (size=12509) 2024-12-09T14:26:07,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741844_1020 (size=12509) 2024-12-09T14:26:07,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/af12296291d146ea9e9c2f7d4b55ed45 2024-12-09T14:26:07,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/af12296291d146ea9e9c2f7d4b55ed45 as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/af12296291d146ea9e9c2f7d4b55ed45 2024-12-09T14:26:07,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/af12296291d146ea9e9c2f7d4b55ed45, entries=7, sequenceid=31, filesize=12.2 K 2024-12-09T14:26:12,867 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:26:12,868 WARN [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:26:12,900 INFO [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:26:12,900 WARN [FSHLog-0-hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d-prefix:f4e784dc7cb5,36625,1733754298203 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38529,DS-f9d69118-ef41-4918-9ef8-dfc47fbbb861,DISK], DatanodeInfoWithStorage[127.0.0.1:39635,DS-4266e32f-b99b-4431-bad2-6fde9b399897,DISK]] 2024-12-09T14:26:12,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for da2e5e20807643dc49235f0603d84567 in 10050ms, sequenceid=31, compaction requested=true 2024-12-09T14:26:12,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for da2e5e20807643dc49235f0603d84567: 2024-12-09T14:26:12,901 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-09T14:26:12,902 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:12,902 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,902 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/cae6da6f77f541eb9b7c3feed61b9a63 because midkey is the same as first or last row 2024-12-09T14:26:12,902 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,906 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,906 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,907 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,907 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754357848 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754367856 2024-12-09T14:26:12,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da2e5e20807643dc49235f0603d84567:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:26:12,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741843_1019 (size=438) 2024-12-09T14:26:12,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:26:12,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741843_1019 (size=438) 2024-12-09T14:26:12,916 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:26:12,918 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754330577 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs/f4e784dc7cb5%2C36625%2C1733754298203.1733754330577 2024-12-09T14:26:12,923 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754342019 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs/f4e784dc7cb5%2C36625%2C1733754298203.1733754342019 2024-12-09T14:26:12,925 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754352834 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs/f4e784dc7cb5%2C36625%2C1733754298203.1733754352834 2024-12-09T14:26:12,929 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754357848 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs/f4e784dc7cb5%2C36625%2C1733754298203.1733754357848 2024-12-09T14:26:12,929 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:26:12,930 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:37545:37545)] 2024-12-09T14:26:12,931 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C36625%2C1733754298203:(num 1733754372931) roll requested 2024-12-09T14:26:12,931 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754372931 2024-12-09T14:26:12,931 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.HStore(1541): da2e5e20807643dc49235f0603d84567/info is initiating minor compaction (all files) 2024-12-09T14:26:12,932 INFO [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of da2e5e20807643dc49235f0603d84567/info in TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:26:12,933 INFO [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/cae6da6f77f541eb9b7c3feed61b9a63, hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/c56a716f19f14f7cb94ee05c580bcd5f, hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/af12296291d146ea9e9c2f7d4b55ed45] into tmpdir=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp, totalSize=36.6 K 2024-12-09T14:26:12,943 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] compactions.Compactor(225): Compacting cae6da6f77f541eb9b7c3feed61b9a63, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733754310532 2024-12-09T14:26:12,944 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] compactions.Compactor(225): Compacting c56a716f19f14f7cb94ee05c580bcd5f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733754324563 2024-12-09T14:26:12,948 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] compactions.Compactor(225): Compacting af12296291d146ea9e9c2f7d4b55ed45, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733754339611 2024-12-09T14:26:12,969 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,970 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,970 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,970 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,970 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:12,970 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754367856 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754372931 2024-12-09T14:26:12,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741845_1021 (size=93) 2024-12-09T14:26:12,978 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754367856 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs/f4e784dc7cb5%2C36625%2C1733754298203.1733754367856 2024-12-09T14:26:12,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741845_1021 (size=93) 2024-12-09T14:26:12,982 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:37545:37545)] 2024-12-09T14:26:12,982 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C36625%2C1733754298203.1733754372982 2024-12-09T14:26:13,023 INFO [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da2e5e20807643dc49235f0603d84567#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:26:13,025 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/1b0f7ca192a14168bff8ca52cfe36a86 is 1080, key is row0001/info:/1733754310532/Put/seqid=0 2024-12-09T14:26:13,057 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:13,058 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:13,059 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:13,059 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:13,060 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:13,060 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754372931 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754372982 2024-12-09T14:26:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741846_1022 (size=1258) 2024-12-09T14:26:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741846_1022 (size=1258) 2024-12-09T14:26:13,086 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45321:45321),(127.0.0.1/127.0.0.1:37545:37545)] 2024-12-09T14:26:13,087 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/WALs/f4e784dc7cb5,36625,1733754298203/f4e784dc7cb5%2C36625%2C1733754298203.1733754372931 is not closed yet, will try archiving it next time 2024-12-09T14:26:13,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741848_1024 (size=27710) 2024-12-09T14:26:13,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741848_1024 (size=27710) 2024-12-09T14:26:13,520 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/1b0f7ca192a14168bff8ca52cfe36a86 as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/1b0f7ca192a14168bff8ca52cfe36a86 2024-12-09T14:26:13,541 INFO [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in da2e5e20807643dc49235f0603d84567/info of da2e5e20807643dc49235f0603d84567 into 1b0f7ca192a14168bff8ca52cfe36a86(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:26:13,541 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for da2e5e20807643dc49235f0603d84567: 2024-12-09T14:26:13,544 INFO [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567., storeName=da2e5e20807643dc49235f0603d84567/info, priority=13, startTime=1733754372903; duration=0sec 2024-12-09T14:26:13,544 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T14:26:13,544 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:13,544 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/1b0f7ca192a14168bff8ca52cfe36a86 because midkey is the same as first or last row 2024-12-09T14:26:13,545 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T14:26:13,545 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:13,545 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/1b0f7ca192a14168bff8ca52cfe36a86 because midkey is the same as first or last row 2024-12-09T14:26:13,545 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T14:26:13,545 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:13,545 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/1b0f7ca192a14168bff8ca52cfe36a86 because midkey is the same as first or last row 2024-12-09T14:26:13,545 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:26:13,545 DEBUG [RS:0;f4e784dc7cb5:36625-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da2e5e20807643dc49235f0603d84567:info 2024-12-09T14:26:25,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36625 {}] regionserver.HRegion(8855): Flush requested on da2e5e20807643dc49235f0603d84567 2024-12-09T14:26:25,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing da2e5e20807643dc49235f0603d84567 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:26:25,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/2b5ad6ad796746dea61856e502b04d36 is 1080, key is row0022/info:/1733754372987/Put/seqid=0 2024-12-09T14:26:25,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741849_1025 (size=12509) 2024-12-09T14:26:25,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741849_1025 (size=12509) 2024-12-09T14:26:25,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/2b5ad6ad796746dea61856e502b04d36 2024-12-09T14:26:25,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/2b5ad6ad796746dea61856e502b04d36 as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/2b5ad6ad796746dea61856e502b04d36 2024-12-09T14:26:25,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/2b5ad6ad796746dea61856e502b04d36, entries=7, sequenceid=42, filesize=12.2 K 2024-12-09T14:26:25,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for da2e5e20807643dc49235f0603d84567 in 38ms, sequenceid=42, compaction requested=false 2024-12-09T14:26:25,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for da2e5e20807643dc49235f0603d84567: 2024-12-09T14:26:25,051 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-09T14:26:25,051 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:25,051 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/1b0f7ca192a14168bff8ca52cfe36a86 because midkey is the same as first or last row 2024-12-09T14:26:26,441 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T14:26:30,856 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region da2e5e20807643dc49235f0603d84567, had cached 0 bytes from a total of 40219 2024-12-09T14:26:33,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T14:26:33,026 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:26:33,026 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:26:33,031 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:33,031 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:33,031 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T14:26:33,032 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T14:26:33,032 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=888866681, stopped=false 2024-12-09T14:26:33,032 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f4e784dc7cb5,32895,1733754297296 2024-12-09T14:26:33,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:26:33,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:26:33,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:33,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:33,034 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:26:33,035 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:26:33,035 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:26:33,035 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:26:33,035 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:26:33,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:33,035 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f4e784dc7cb5,36625,1733754298203' ***** 2024-12-09T14:26:33,035 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T14:26:33,036 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T14:26:33,036 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T14:26:33,036 INFO [RS:0;f4e784dc7cb5:36625 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T14:26:33,036 INFO [RS:0;f4e784dc7cb5:36625 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T14:26:33,037 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(3091): Received CLOSE for da2e5e20807643dc49235f0603d84567 2024-12-09T14:26:33,037 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(959): stopping server f4e784dc7cb5,36625,1733754298203 2024-12-09T14:26:33,037 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:26:33,037 INFO [RS:0;f4e784dc7cb5:36625 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f4e784dc7cb5:36625. 2024-12-09T14:26:33,037 DEBUG [RS:0;f4e784dc7cb5:36625 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:26:33,038 DEBUG [RS:0;f4e784dc7cb5:36625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:33,038 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing da2e5e20807643dc49235f0603d84567, disabling compactions & flushes 2024-12-09T14:26:33,038 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T14:26:33,038 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:26:33,038 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T14:26:33,038 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T14:26:33,038 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:26:33,038 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. after waiting 0 ms 2024-12-09T14:26:33,038 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T14:26:33,038 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:26:33,038 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing da2e5e20807643dc49235f0603d84567 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-09T14:26:33,038 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T14:26:33,038 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:26:33,038 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1325): Online Regions={da2e5e20807643dc49235f0603d84567=TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T14:26:33,039 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:26:33,039 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:26:33,039 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:26:33,039 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:26:33,039 DEBUG [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, da2e5e20807643dc49235f0603d84567 2024-12-09T14:26:33,039 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-09T14:26:33,044 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/589e4414faa94429af4d1340e6914755 is 1080, key is row0029/info:/1733754387016/Put/seqid=0 2024-12-09T14:26:33,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741850_1026 (size=8193) 2024-12-09T14:26:33,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741850_1026 (size=8193) 2024-12-09T14:26:33,057 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/589e4414faa94429af4d1340e6914755 2024-12-09T14:26:33,064 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/info/e0cc2b17c8fe4368b01bdce3ddd0b37e is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567./info:regioninfo/1733754300881/Put/seqid=0 2024-12-09T14:26:33,067 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/.tmp/info/589e4414faa94429af4d1340e6914755 as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/589e4414faa94429af4d1340e6914755 2024-12-09T14:26:33,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741851_1027 (size=7016) 2024-12-09T14:26:33,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741851_1027 (size=7016) 2024-12-09T14:26:33,071 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/info/e0cc2b17c8fe4368b01bdce3ddd0b37e 2024-12-09T14:26:33,076 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/589e4414faa94429af4d1340e6914755, entries=3, sequenceid=48, filesize=8.0 K 2024-12-09T14:26:33,077 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for da2e5e20807643dc49235f0603d84567 in 39ms, sequenceid=48, compaction requested=true 2024-12-09T14:26:33,078 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/cae6da6f77f541eb9b7c3feed61b9a63, hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/c56a716f19f14f7cb94ee05c580bcd5f, hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/af12296291d146ea9e9c2f7d4b55ed45] to archive 2024-12-09T14:26:33,081 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T14:26:33,085 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/cae6da6f77f541eb9b7c3feed61b9a63 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/cae6da6f77f541eb9b7c3feed61b9a63 2024-12-09T14:26:33,087 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/c56a716f19f14f7cb94ee05c580bcd5f to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/c56a716f19f14f7cb94ee05c580bcd5f 2024-12-09T14:26:33,089 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/af12296291d146ea9e9c2f7d4b55ed45 to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/info/af12296291d146ea9e9c2f7d4b55ed45 2024-12-09T14:26:33,101 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/ns/6663b03231bf4a838595a009c7854f09 is 43, key is default/ns:d/1733754300110/Put/seqid=0 2024-12-09T14:26:33,103 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f4e784dc7cb5:32895 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T14:26:33,104 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cae6da6f77f541eb9b7c3feed61b9a63=12509, c56a716f19f14f7cb94ee05c580bcd5f=12509, af12296291d146ea9e9c2f7d4b55ed45=12509] 2024-12-09T14:26:33,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741852_1028 (size=5153) 2024-12-09T14:26:33,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741852_1028 (size=5153) 2024-12-09T14:26:33,109 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/ns/6663b03231bf4a838595a009c7854f09 2024-12-09T14:26:33,111 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/default/TestLogRolling-testSlowSyncLogRolling/da2e5e20807643dc49235f0603d84567/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-09T14:26:33,113 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:26:33,113 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for da2e5e20807643dc49235f0603d84567: Waiting for close lock at 1733754393037Running coprocessor pre-close hooks at 1733754393038 (+1 ms)Disabling compacts and flushes for region at 1733754393038Disabling writes for close at 1733754393038Obtaining lock to block concurrent updates at 1733754393038Preparing flush snapshotting stores in da2e5e20807643dc49235f0603d84567 at 1733754393038Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733754393039 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. at 1733754393040 (+1 ms)Flushing da2e5e20807643dc49235f0603d84567/info: creating writer at 1733754393040Flushing da2e5e20807643dc49235f0603d84567/info: appending metadata at 1733754393044 (+4 ms)Flushing da2e5e20807643dc49235f0603d84567/info: closing flushed file at 1733754393044Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@831b558: reopening flushed file at 1733754393066 (+22 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for da2e5e20807643dc49235f0603d84567 in 39ms, sequenceid=48, compaction requested=true at 1733754393078 (+12 ms)Writing region close event to WAL at 1733754393105 (+27 ms)Running coprocessor post-close hooks at 1733754393111 (+6 ms)Closed at 1733754393113 (+2 ms) 2024-12-09T14:26:33,114 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733754300422.da2e5e20807643dc49235f0603d84567. 2024-12-09T14:26:33,134 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/table/3554cffa9d644ed48360d875e8444eb0 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733754300897/Put/seqid=0 2024-12-09T14:26:33,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741853_1029 (size=5396) 2024-12-09T14:26:33,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741853_1029 (size=5396) 2024-12-09T14:26:33,141 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/table/3554cffa9d644ed48360d875e8444eb0 2024-12-09T14:26:33,149 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/info/e0cc2b17c8fe4368b01bdce3ddd0b37e as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/info/e0cc2b17c8fe4368b01bdce3ddd0b37e 2024-12-09T14:26:33,157 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/info/e0cc2b17c8fe4368b01bdce3ddd0b37e, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T14:26:33,159 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/ns/6663b03231bf4a838595a009c7854f09 as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/ns/6663b03231bf4a838595a009c7854f09 2024-12-09T14:26:33,167 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/ns/6663b03231bf4a838595a009c7854f09, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T14:26:33,168 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/.tmp/table/3554cffa9d644ed48360d875e8444eb0 as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/table/3554cffa9d644ed48360d875e8444eb0 2024-12-09T14:26:33,176 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/table/3554cffa9d644ed48360d875e8444eb0, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T14:26:33,177 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-12-09T14:26:33,183 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T14:26:33,184 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:26:33,185 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:26:33,185 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754393038Running coprocessor pre-close hooks at 1733754393038Disabling compacts and flushes for region at 1733754393038Disabling writes for close at 1733754393039 (+1 ms)Obtaining lock to block concurrent updates at 1733754393039Preparing flush snapshotting stores in 1588230740 at 1733754393039Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733754393040 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733754393040Flushing 1588230740/info: creating writer at 1733754393041 (+1 ms)Flushing 1588230740/info: appending metadata at 1733754393063 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733754393063Flushing 1588230740/ns: creating writer at 1733754393079 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733754393100 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1733754393100Flushing 1588230740/table: creating writer at 1733754393117 (+17 ms)Flushing 1588230740/table: appending metadata at 1733754393133 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733754393133Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7748ad06: reopening flushed file at 1733754393148 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@571f47dd: reopening flushed file at 1733754393158 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6747e5bf: reopening flushed file at 1733754393167 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1733754393177 (+10 ms)Writing region close event to WAL at 1733754393179 (+2 ms)Running coprocessor post-close hooks at 1733754393184 (+5 ms)Closed at 1733754393185 (+1 ms) 2024-12-09T14:26:33,185 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T14:26:33,239 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(976): stopping server f4e784dc7cb5,36625,1733754298203; all regions closed. 2024-12-09T14:26:33,241 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,241 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,241 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,241 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,241 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741834_1010 (size=3066) 2024-12-09T14:26:33,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741834_1010 (size=3066) 2024-12-09T14:26:33,423 INFO [regionserver/f4e784dc7cb5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T14:26:33,423 INFO [regionserver/f4e784dc7cb5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T14:26:33,429 INFO [regionserver/f4e784dc7cb5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:26:33,650 DEBUG [RS:0;f4e784dc7cb5:36625 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs 2024-12-09T14:26:33,650 INFO [RS:0;f4e784dc7cb5:36625 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C36625%2C1733754298203.meta:.meta(num 1733754299932) 2024-12-09T14:26:33,650 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741847_1023 (size=12695) 2024-12-09T14:26:33,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741847_1023 (size=12695) 2024-12-09T14:26:33,659 DEBUG [RS:0;f4e784dc7cb5:36625 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/oldWALs 2024-12-09T14:26:33,659 INFO [RS:0;f4e784dc7cb5:36625 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C36625%2C1733754298203:(num 1733754372982) 2024-12-09T14:26:33,659 DEBUG [RS:0;f4e784dc7cb5:36625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:33,659 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:26:33,659 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:26:33,660 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.ChoreService(370): Chore service for: regionserver/f4e784dc7cb5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T14:26:33,660 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:26:33,660 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:26:33,660 INFO [RS:0;f4e784dc7cb5:36625 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36625 2024-12-09T14:26:33,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f4e784dc7cb5,36625,1733754298203 2024-12-09T14:26:33,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:26:33,665 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:26:33,668 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f4e784dc7cb5,36625,1733754298203] 2024-12-09T14:26:33,671 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f4e784dc7cb5,36625,1733754298203 already deleted, retry=false 2024-12-09T14:26:33,671 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f4e784dc7cb5,36625,1733754298203 expired; onlineServers=0 2024-12-09T14:26:33,671 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f4e784dc7cb5,32895,1733754297296' ***** 2024-12-09T14:26:33,671 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T14:26:33,672 INFO [M:0;f4e784dc7cb5:32895 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:26:33,672 INFO [M:0;f4e784dc7cb5:32895 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:26:33,672 DEBUG [M:0;f4e784dc7cb5:32895 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T14:26:33,672 DEBUG [M:0;f4e784dc7cb5:32895 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T14:26:33,672 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T14:26:33,672 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754299213 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754299213,5,FailOnTimeoutGroup] 2024-12-09T14:26:33,672 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754299214 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754299214,5,FailOnTimeoutGroup] 2024-12-09T14:26:33,672 INFO [M:0;f4e784dc7cb5:32895 {}] hbase.ChoreService(370): Chore service for: master/f4e784dc7cb5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T14:26:33,672 INFO [M:0;f4e784dc7cb5:32895 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:26:33,673 DEBUG [M:0;f4e784dc7cb5:32895 {}] master.HMaster(1795): Stopping service threads 2024-12-09T14:26:33,673 INFO [M:0;f4e784dc7cb5:32895 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T14:26:33,673 INFO [M:0;f4e784dc7cb5:32895 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:26:33,673 INFO [M:0;f4e784dc7cb5:32895 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T14:26:33,673 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T14:26:33,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T14:26:33,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:33,675 DEBUG [M:0;f4e784dc7cb5:32895 {}] zookeeper.ZKUtil(347): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T14:26:33,675 WARN [M:0;f4e784dc7cb5:32895 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T14:26:33,676 INFO [M:0;f4e784dc7cb5:32895 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/.lastflushedseqids 2024-12-09T14:26:33,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741854_1030 (size=130) 2024-12-09T14:26:33,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741854_1030 (size=130) 2024-12-09T14:26:33,692 INFO [M:0;f4e784dc7cb5:32895 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T14:26:33,692 INFO [M:0;f4e784dc7cb5:32895 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T14:26:33,692 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:26:33,692 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:33,692 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:33,692 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:26:33,692 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:33,693 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-09T14:26:33,711 DEBUG [M:0;f4e784dc7cb5:32895 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca4c4eb3b021404891ca75ab5840ebee is 82, key is hbase:meta,,1/info:regioninfo/1733754300021/Put/seqid=0 2024-12-09T14:26:33,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741855_1031 (size=5672) 2024-12-09T14:26:33,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741855_1031 (size=5672) 2024-12-09T14:26:33,718 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca4c4eb3b021404891ca75ab5840ebee 2024-12-09T14:26:33,741 DEBUG [M:0;f4e784dc7cb5:32895 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8add01d17efb4e5ca5978e6a809a444e is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733754300904/Put/seqid=0 2024-12-09T14:26:33,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741856_1032 (size=6247) 2024-12-09T14:26:33,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741856_1032 (size=6247) 2024-12-09T14:26:33,748 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8add01d17efb4e5ca5978e6a809a444e 2024-12-09T14:26:33,755 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8add01d17efb4e5ca5978e6a809a444e 2024-12-09T14:26:33,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:26:33,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36625-0x1012b92a8d30001, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:26:33,769 INFO [RS:0;f4e784dc7cb5:36625 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:26:33,769 INFO [RS:0;f4e784dc7cb5:36625 {}] regionserver.HRegionServer(1031): Exiting; stopping=f4e784dc7cb5,36625,1733754298203; zookeeper connection closed. 2024-12-09T14:26:33,769 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@184b911e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@184b911e 2024-12-09T14:26:33,770 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T14:26:33,772 DEBUG [M:0;f4e784dc7cb5:32895 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/66d146b3370b4590bcebddd280be624b is 69, key is f4e784dc7cb5,36625,1733754298203/rs:state/1733754299354/Put/seqid=0 2024-12-09T14:26:33,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741857_1033 (size=5156) 2024-12-09T14:26:33,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741857_1033 (size=5156) 2024-12-09T14:26:33,780 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/66d146b3370b4590bcebddd280be624b 2024-12-09T14:26:33,805 DEBUG [M:0;f4e784dc7cb5:32895 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db553aee08974bed98f2f9a243d3da62 is 52, key is load_balancer_on/state:d/1733754300395/Put/seqid=0 2024-12-09T14:26:33,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741858_1034 (size=5056) 2024-12-09T14:26:33,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741858_1034 (size=5056) 2024-12-09T14:26:33,813 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db553aee08974bed98f2f9a243d3da62 2024-12-09T14:26:33,823 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca4c4eb3b021404891ca75ab5840ebee as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ca4c4eb3b021404891ca75ab5840ebee 2024-12-09T14:26:33,830 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ca4c4eb3b021404891ca75ab5840ebee, entries=8, sequenceid=59, filesize=5.5 K 2024-12-09T14:26:33,831 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8add01d17efb4e5ca5978e6a809a444e as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8add01d17efb4e5ca5978e6a809a444e 2024-12-09T14:26:33,838 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8add01d17efb4e5ca5978e6a809a444e 2024-12-09T14:26:33,838 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8add01d17efb4e5ca5978e6a809a444e, entries=6, sequenceid=59, filesize=6.1 K 2024-12-09T14:26:33,840 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/66d146b3370b4590bcebddd280be624b as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/66d146b3370b4590bcebddd280be624b 2024-12-09T14:26:33,847 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/66d146b3370b4590bcebddd280be624b, entries=1, sequenceid=59, filesize=5.0 K 2024-12-09T14:26:33,849 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/db553aee08974bed98f2f9a243d3da62 as hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/db553aee08974bed98f2f9a243d3da62 2024-12-09T14:26:33,858 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/db553aee08974bed98f2f9a243d3da62, entries=1, sequenceid=59, filesize=4.9 K 2024-12-09T14:26:33,859 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=59, compaction requested=false 2024-12-09T14:26:33,861 INFO [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:33,862 DEBUG [M:0;f4e784dc7cb5:32895 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754393692Disabling compacts and flushes for region at 1733754393692Disabling writes for close at 1733754393692Obtaining lock to block concurrent updates at 1733754393693 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733754393693Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733754393693Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733754393694 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733754393694Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733754393711 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733754393711Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733754393725 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733754393740 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733754393740Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733754393755 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733754393772 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733754393772Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733754393787 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733754393804 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733754393804Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3609ff7c: reopening flushed file at 1733754393821 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61795b8c: reopening flushed file at 1733754393830 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cd5a074: reopening flushed file at 1733754393838 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ff74931: reopening flushed file at 1733754393848 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=59, compaction requested=false at 1733754393859 (+11 ms)Writing region close event to WAL at 1733754393861 (+2 ms)Closed at 1733754393861 2024-12-09T14:26:33,863 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,863 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,863 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,864 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,864 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:33,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38529 is added to blk_1073741830_1006 (size=27973) 2024-12-09T14:26:33,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39635 is added to blk_1073741830_1006 (size=27973) 2024-12-09T14:26:33,868 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:26:33,868 INFO [M:0;f4e784dc7cb5:32895 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T14:26:33,868 INFO [M:0;f4e784dc7cb5:32895 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:32895 2024-12-09T14:26:33,868 INFO [M:0;f4e784dc7cb5:32895 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:26:33,971 INFO [M:0;f4e784dc7cb5:32895 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:26:33,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:26:33,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32895-0x1012b92a8d30000, quorum=127.0.0.1:62538, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:26:33,978 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:33,981 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:33,981 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:33,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:33,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:33,986 WARN [BP-334055039-172.17.0.3-1733754294478 heartbeating to localhost/127.0.0.1:44241 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:26:33,986 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:26:33,986 WARN [BP-334055039-172.17.0.3-1733754294478 heartbeating to localhost/127.0.0.1:44241 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-334055039-172.17.0.3-1733754294478 (Datanode Uuid 640d0cff-c6b1-425c-af40-f6d4ccb7e7fb) service to localhost/127.0.0.1:44241 2024-12-09T14:26:33,986 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:26:33,988 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/data/data3/current/BP-334055039-172.17.0.3-1733754294478 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:33,988 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/data/data4/current/BP-334055039-172.17.0.3-1733754294478 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:33,989 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:26:33,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:33,992 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:33,992 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:33,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:33,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:33,994 WARN [BP-334055039-172.17.0.3-1733754294478 heartbeating to localhost/127.0.0.1:44241 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:26:33,994 WARN [BP-334055039-172.17.0.3-1733754294478 heartbeating to localhost/127.0.0.1:44241 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-334055039-172.17.0.3-1733754294478 (Datanode Uuid a5b89841-b0d0-4cbe-9121-d1a427f33509) service to localhost/127.0.0.1:44241 2024-12-09T14:26:33,994 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:26:33,994 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:26:33,994 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/data/data1/current/BP-334055039-172.17.0.3-1733754294478 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:33,994 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/cluster_6a57e60b-f582-fbe8-f54a-e7c669fe7469/data/data2/current/BP-334055039-172.17.0.3-1733754294478 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:33,995 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:26:34,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:26:34,005 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:34,005 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:34,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:34,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:34,015 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T14:26:34,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T14:26:34,061 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44241 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44241 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/f4e784dc7cb5:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44241 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44241 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44241 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/f4e784dc7cb5:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@efc635d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44241 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44241 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/f4e784dc7cb5:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44241 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=163 (was 476), ProcessCount=11 (was 11), AvailableMemoryMB=5710 (was 6454) 2024-12-09T14:26:34,068 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=163, ProcessCount=11, AvailableMemoryMB=5709 2024-12-09T14:26:34,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T14:26:34,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.log.dir so I do NOT create it in target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156 2024-12-09T14:26:34,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1bedb25-871c-93ed-52c1-5d334a4de394/hadoop.tmp.dir so I do NOT create it in target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156 2024-12-09T14:26:34,069 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782, deleteOnExit=true 2024-12-09T14:26:34,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T14:26:34,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/test.cache.data in system properties and HBase conf 2024-12-09T14:26:34,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T14:26:34,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.log.dir in system properties and HBase conf 2024-12-09T14:26:34,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T14:26:34,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T14:26:34,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T14:26:34,071 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T14:26:34,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:26:34,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:26:34,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T14:26:34,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:26:34,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T14:26:34,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T14:26:34,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:26:34,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:26:34,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T14:26:34,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/nfs.dump.dir in system properties and HBase conf 2024-12-09T14:26:34,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/java.io.tmpdir in system properties and HBase conf 2024-12-09T14:26:34,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:26:34,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T14:26:34,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T14:26:34,095 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:26:34,175 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:34,180 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:34,182 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:34,182 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:34,182 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:26:34,182 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:34,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e979f8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:34,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46d26a79{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:34,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26a9efc0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/java.io.tmpdir/jetty-localhost-44719-hadoop-hdfs-3_4_1-tests_jar-_-any-7023931888194390589/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:26:34,303 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45e3157d{HTTP/1.1, (http/1.1)}{localhost:44719} 2024-12-09T14:26:34,303 INFO [Time-limited test {}] server.Server(415): Started @102161ms 2024-12-09T14:26:34,318 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:26:34,392 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:34,397 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:34,398 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:34,398 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:34,399 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:26:34,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48bb784e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:34,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a0f3c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:34,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@641eaf99{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/java.io.tmpdir/jetty-localhost-45663-hadoop-hdfs-3_4_1-tests_jar-_-any-12965113492082590661/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:34,518 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@276f8783{HTTP/1.1, (http/1.1)}{localhost:45663} 2024-12-09T14:26:34,518 INFO [Time-limited test {}] server.Server(415): Started @102376ms 2024-12-09T14:26:34,520 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:26:34,559 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:34,563 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:34,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:34,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:34,564 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:26:34,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f11c100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:34,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@194f043a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:34,652 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/data/data1/current/BP-946150497-172.17.0.3-1733754394114/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:34,652 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/data/data2/current/BP-946150497-172.17.0.3-1733754394114/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:34,671 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:26:34,674 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd73a22a2d0ab168b with lease ID 0xbc9e91cd7e6ad3a4: Processing first storage report for DS-09ac13d5-c2c0-4ca4-a64b-abb500dfce70 from datanode DatanodeRegistration(127.0.0.1:43407, datanodeUuid=1a7ccbdd-601b-4baf-873b-2cd079b3d084, infoPort=43969, infoSecurePort=0, ipcPort=46837, storageInfo=lv=-57;cid=testClusterID;nsid=1646102668;c=1733754394114) 2024-12-09T14:26:34,674 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd73a22a2d0ab168b with lease ID 0xbc9e91cd7e6ad3a4: from storage DS-09ac13d5-c2c0-4ca4-a64b-abb500dfce70 node DatanodeRegistration(127.0.0.1:43407, datanodeUuid=1a7ccbdd-601b-4baf-873b-2cd079b3d084, infoPort=43969, infoSecurePort=0, ipcPort=46837, storageInfo=lv=-57;cid=testClusterID;nsid=1646102668;c=1733754394114), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T14:26:34,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd73a22a2d0ab168b with lease ID 0xbc9e91cd7e6ad3a4: Processing first storage report for DS-6e7c01f5-224e-4766-bcae-8751dd77b4ec from datanode DatanodeRegistration(127.0.0.1:43407, datanodeUuid=1a7ccbdd-601b-4baf-873b-2cd079b3d084, infoPort=43969, infoSecurePort=0, ipcPort=46837, storageInfo=lv=-57;cid=testClusterID;nsid=1646102668;c=1733754394114) 2024-12-09T14:26:34,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd73a22a2d0ab168b with lease ID 0xbc9e91cd7e6ad3a4: from storage DS-6e7c01f5-224e-4766-bcae-8751dd77b4ec node DatanodeRegistration(127.0.0.1:43407, datanodeUuid=1a7ccbdd-601b-4baf-873b-2cd079b3d084, infoPort=43969, infoSecurePort=0, ipcPort=46837, storageInfo=lv=-57;cid=testClusterID;nsid=1646102668;c=1733754394114), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:34,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b95e72c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/java.io.tmpdir/jetty-localhost-35363-hadoop-hdfs-3_4_1-tests_jar-_-any-2527044099564182955/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:34,687 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@376d199b{HTTP/1.1, (http/1.1)}{localhost:35363} 2024-12-09T14:26:34,687 INFO [Time-limited test {}] server.Server(415): Started @102545ms 2024-12-09T14:26:34,689 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:26:34,788 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/data/data4/current/BP-946150497-172.17.0.3-1733754394114/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:34,788 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/data/data3/current/BP-946150497-172.17.0.3-1733754394114/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:34,812 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:26:34,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xda90799ce0e48ad8 with lease ID 0xbc9e91cd7e6ad3a5: Processing first storage report for DS-563dceb4-2d85-466a-802e-bb4bb81f476c from datanode DatanodeRegistration(127.0.0.1:44267, datanodeUuid=acace847-30e6-47cb-89b5-31e661391b6e, infoPort=35751, infoSecurePort=0, ipcPort=44245, storageInfo=lv=-57;cid=testClusterID;nsid=1646102668;c=1733754394114) 2024-12-09T14:26:34,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xda90799ce0e48ad8 with lease ID 0xbc9e91cd7e6ad3a5: from storage DS-563dceb4-2d85-466a-802e-bb4bb81f476c node DatanodeRegistration(127.0.0.1:44267, datanodeUuid=acace847-30e6-47cb-89b5-31e661391b6e, infoPort=35751, infoSecurePort=0, ipcPort=44245, storageInfo=lv=-57;cid=testClusterID;nsid=1646102668;c=1733754394114), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:34,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xda90799ce0e48ad8 with lease ID 0xbc9e91cd7e6ad3a5: Processing first storage report for DS-e4422d83-6934-4e1e-a147-91fee958fa70 from datanode DatanodeRegistration(127.0.0.1:44267, datanodeUuid=acace847-30e6-47cb-89b5-31e661391b6e, infoPort=35751, infoSecurePort=0, ipcPort=44245, storageInfo=lv=-57;cid=testClusterID;nsid=1646102668;c=1733754394114) 2024-12-09T14:26:34,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xda90799ce0e48ad8 with lease ID 0xbc9e91cd7e6ad3a5: from storage DS-e4422d83-6934-4e1e-a147-91fee958fa70 node DatanodeRegistration(127.0.0.1:44267, datanodeUuid=acace847-30e6-47cb-89b5-31e661391b6e, infoPort=35751, infoSecurePort=0, ipcPort=44245, storageInfo=lv=-57;cid=testClusterID;nsid=1646102668;c=1733754394114), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:34,818 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156 2024-12-09T14:26:34,822 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/zookeeper_0, clientPort=63191, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T14:26:34,823 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63191 2024-12-09T14:26:34,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:34,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:34,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:26:34,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:26:34,837 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b with version=8 2024-12-09T14:26:34,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase-staging 2024-12-09T14:26:34,839 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:26:34,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:34,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:34,839 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:26:34,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:34,839 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:26:34,839 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T14:26:34,840 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:26:34,840 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35663 2024-12-09T14:26:34,841 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35663 connecting to ZooKeeper ensemble=127.0.0.1:63191 2024-12-09T14:26:34,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:356630x0, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:26:34,850 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35663-0x1012b9428e50000 connected 2024-12-09T14:26:34,866 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:34,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:34,871 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:26:34,871 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b, hbase.cluster.distributed=false 2024-12-09T14:26:34,873 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:26:34,873 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35663 2024-12-09T14:26:34,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35663 2024-12-09T14:26:34,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35663 2024-12-09T14:26:34,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35663 2024-12-09T14:26:34,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35663 2024-12-09T14:26:34,895 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:26:34,895 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:34,895 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:34,895 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:26:34,895 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:34,895 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:26:34,895 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T14:26:34,895 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:26:34,896 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40785 2024-12-09T14:26:34,897 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40785 connecting to ZooKeeper ensemble=127.0.0.1:63191 2024-12-09T14:26:34,898 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:34,900 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:34,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:407850x0, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:26:34,905 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40785-0x1012b9428e50001 connected 2024-12-09T14:26:34,906 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:26:34,906 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T14:26:34,907 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T14:26:34,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T14:26:34,909 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:26:34,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40785 2024-12-09T14:26:34,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40785 2024-12-09T14:26:34,910 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40785 2024-12-09T14:26:34,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40785 2024-12-09T14:26:34,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40785 2024-12-09T14:26:34,926 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f4e784dc7cb5:35663 2024-12-09T14:26:34,926 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:34,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:26:34,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:26:34,929 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:34,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T14:26:34,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:34,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:34,933 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T14:26:34,933 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f4e784dc7cb5,35663,1733754394839 from backup master directory 2024-12-09T14:26:34,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:26:34,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:34,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:26:34,935 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:26:34,936 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:34,942 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/hbase.id] with ID: b46282b3-80c6-49d5-bc31-e622524eee19 2024-12-09T14:26:34,942 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/.tmp/hbase.id 2024-12-09T14:26:34,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:26:34,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:26:34,954 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/.tmp/hbase.id]:[hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/hbase.id] 2024-12-09T14:26:34,971 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:34,971 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T14:26:34,973 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T14:26:34,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:34,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:34,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:26:34,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:26:34,994 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:26:34,995 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T14:26:34,995 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:26:35,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:26:35,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:26:35,007 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store 2024-12-09T14:26:35,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:26:35,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:26:35,017 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:35,017 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:26:35,017 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:35,017 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:35,017 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:26:35,017 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:35,017 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:35,018 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754395017Disabling compacts and flushes for region at 1733754395017Disabling writes for close at 1733754395017Writing region close event to WAL at 1733754395017Closed at 1733754395017 2024-12-09T14:26:35,019 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/.initializing 2024-12-09T14:26:35,019 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/WALs/f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:35,023 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C35663%2C1733754394839, suffix=, logDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/WALs/f4e784dc7cb5,35663,1733754394839, archiveDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/oldWALs, maxLogs=10 2024-12-09T14:26:35,024 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C35663%2C1733754394839.1733754395023 2024-12-09T14:26:35,029 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/WALs/f4e784dc7cb5,35663,1733754394839/f4e784dc7cb5%2C35663%2C1733754394839.1733754395023 2024-12-09T14:26:35,030 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43969:43969),(127.0.0.1/127.0.0.1:35751:35751)] 2024-12-09T14:26:35,031 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:26:35,031 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:35,031 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,031 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T14:26:35,034 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T14:26:35,037 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:26:35,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T14:26:35,040 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:26:35,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T14:26:35,042 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:26:35,043 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,044 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,044 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,046 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,046 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,046 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T14:26:35,048 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:35,050 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:26:35,051 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762239, jitterRate=-0.030763596296310425}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T14:26:35,052 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733754395031Initializing all the Stores at 1733754395032 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754395032Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754395032Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754395032Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754395033 (+1 ms)Cleaning up temporary data from old regions at 1733754395046 (+13 ms)Region opened successfully at 1733754395052 (+6 ms) 2024-12-09T14:26:35,052 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T14:26:35,057 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ce7370e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:26:35,058 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T14:26:35,058 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T14:26:35,058 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T14:26:35,058 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T14:26:35,059 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T14:26:35,059 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T14:26:35,059 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T14:26:35,062 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T14:26:35,063 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T14:26:35,064 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T14:26:35,065 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T14:26:35,066 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T14:26:35,068 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T14:26:35,068 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T14:26:35,069 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T14:26:35,071 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T14:26:35,072 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T14:26:35,073 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T14:26:35,076 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T14:26:35,077 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T14:26:35,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:26:35,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:26:35,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,081 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f4e784dc7cb5,35663,1733754394839, sessionid=0x1012b9428e50000, setting cluster-up flag (Was=false) 2024-12-09T14:26:35,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,091 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T14:26:35,093 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:35,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,102 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T14:26:35,103 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:35,105 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T14:26:35,110 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T14:26:35,110 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T14:26:35,111 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T14:26:35,111 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f4e784dc7cb5,35663,1733754394839 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T14:26:35,112 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:26:35,113 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:26:35,113 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:26:35,113 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:26:35,113 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f4e784dc7cb5:0, corePoolSize=10, maxPoolSize=10 2024-12-09T14:26:35,113 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,113 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:26:35,113 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,114 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733754425114 2024-12-09T14:26:35,114 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T14:26:35,114 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T14:26:35,114 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T14:26:35,114 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T14:26:35,114 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T14:26:35,115 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T14:26:35,115 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,115 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(746): ClusterId : b46282b3-80c6-49d5-bc31-e622524eee19 2024-12-09T14:26:35,115 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T14:26:35,115 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T14:26:35,115 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:26:35,115 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T14:26:35,115 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T14:26:35,115 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T14:26:35,116 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T14:26:35,116 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T14:26:35,116 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754395116,5,FailOnTimeoutGroup] 2024-12-09T14:26:35,116 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754395116,5,FailOnTimeoutGroup] 2024-12-09T14:26:35,116 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,116 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T14:26:35,116 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,117 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,117 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,117 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T14:26:35,117 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T14:26:35,117 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T14:26:35,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:26:35,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:26:35,125 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T14:26:35,126 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b 2024-12-09T14:26:35,126 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T14:26:35,127 DEBUG [RS:0;f4e784dc7cb5:40785 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30539560, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:26:35,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:26:35,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:26:35,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:35,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:26:35,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:26:35,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:26:35,140 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:26:35,140 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:26:35,142 DEBUG [RS:0;f4e784dc7cb5:40785 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f4e784dc7cb5:40785 2024-12-09T14:26:35,142 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T14:26:35,142 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T14:26:35,142 DEBUG [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T14:26:35,143 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:26:35,143 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,35663,1733754394839 with port=40785, startcode=1733754394894 2024-12-09T14:26:35,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,144 DEBUG [RS:0;f4e784dc7cb5:40785 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T14:26:35,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:26:35,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:26:35,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:26:35,147 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47687, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T14:26:35,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740 2024-12-09T14:26:35,148 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35663 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,148 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35663 {}] master.ServerManager(517): Registering regionserver=f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740 2024-12-09T14:26:35,150 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:26:35,150 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:26:35,150 DEBUG [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b 2024-12-09T14:26:35,150 DEBUG [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37339 2024-12-09T14:26:35,150 DEBUG [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T14:26:35,151 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:26:35,152 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:26:35,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:26:35,153 DEBUG [RS:0;f4e784dc7cb5:40785 {}] zookeeper.ZKUtil(111): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,153 WARN [RS:0;f4e784dc7cb5:40785 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:26:35,153 INFO [RS:0;f4e784dc7cb5:40785 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:26:35,153 DEBUG [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/WALs/f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,153 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f4e784dc7cb5,40785,1733754394894] 2024-12-09T14:26:35,158 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T14:26:35,158 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:26:35,159 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799666, jitterRate=0.016828715801239014}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:26:35,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733754395134Initializing all the Stores at 1733754395135 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754395135Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754395136 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754395136Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754395136Cleaning up temporary data from old regions at 1733754395150 (+14 ms)Region opened successfully at 1733754395161 (+11 ms) 2024-12-09T14:26:35,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:26:35,161 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:26:35,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:26:35,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:26:35,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:26:35,163 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:26:35,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754395161Disabling compacts and flushes for region at 1733754395161Disabling writes for close at 1733754395162 (+1 ms)Writing region close event to WAL at 1733754395162Closed at 1733754395163 (+1 ms) 2024-12-09T14:26:35,163 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T14:26:35,163 INFO [RS:0;f4e784dc7cb5:40785 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:26:35,163 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,164 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T14:26:35,165 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T14:26:35,165 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,165 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,165 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,165 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,165 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:35,166 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:26:35,166 DEBUG [RS:0;f4e784dc7cb5:40785 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:26:35,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T14:26:35,169 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:26:35,171 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T14:26:35,175 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,176 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,176 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,176 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,176 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,176 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,40785,1733754394894-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:26:35,197 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T14:26:35,197 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,40785,1733754394894-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,197 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,197 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.Replication(171): f4e784dc7cb5,40785,1733754394894 started 2024-12-09T14:26:35,217 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,217 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1482): Serving as f4e784dc7cb5,40785,1733754394894, RpcServer on f4e784dc7cb5/172.17.0.3:40785, sessionid=0x1012b9428e50001 2024-12-09T14:26:35,217 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T14:26:35,217 DEBUG [RS:0;f4e784dc7cb5:40785 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,217 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,40785,1733754394894' 2024-12-09T14:26:35,217 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T14:26:35,218 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T14:26:35,219 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T14:26:35,219 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T14:26:35,219 DEBUG [RS:0;f4e784dc7cb5:40785 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,219 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,40785,1733754394894' 2024-12-09T14:26:35,219 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T14:26:35,220 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T14:26:35,220 DEBUG [RS:0;f4e784dc7cb5:40785 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T14:26:35,221 INFO [RS:0;f4e784dc7cb5:40785 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T14:26:35,221 INFO [RS:0;f4e784dc7cb5:40785 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T14:26:35,321 WARN [f4e784dc7cb5:35663 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T14:26:35,323 INFO [RS:0;f4e784dc7cb5:40785 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C40785%2C1733754394894, suffix=, logDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/WALs/f4e784dc7cb5,40785,1733754394894, archiveDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/oldWALs, maxLogs=32 2024-12-09T14:26:35,325 INFO [RS:0;f4e784dc7cb5:40785 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40785%2C1733754394894.1733754395324 2024-12-09T14:26:35,332 INFO [RS:0;f4e784dc7cb5:40785 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/WALs/f4e784dc7cb5,40785,1733754394894/f4e784dc7cb5%2C40785%2C1733754394894.1733754395324 2024-12-09T14:26:35,333 DEBUG [RS:0;f4e784dc7cb5:40785 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43969:43969),(127.0.0.1/127.0.0.1:35751:35751)] 2024-12-09T14:26:35,571 DEBUG [f4e784dc7cb5:35663 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T14:26:35,572 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,574 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,40785,1733754394894, state=OPENING 2024-12-09T14:26:35,575 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T14:26:35,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,578 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:26:35,578 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:26:35,578 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:26:35,578 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,40785,1733754394894}] 2024-12-09T14:26:35,732 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T14:26:35,734 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57543, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T14:26:35,738 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T14:26:35,739 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:26:35,741 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C40785%2C1733754394894.meta, suffix=.meta, logDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/WALs/f4e784dc7cb5,40785,1733754394894, archiveDir=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/oldWALs, maxLogs=32 2024-12-09T14:26:35,743 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40785%2C1733754394894.meta.1733754395742.meta 2024-12-09T14:26:35,749 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/WALs/f4e784dc7cb5,40785,1733754394894/f4e784dc7cb5%2C40785%2C1733754394894.meta.1733754395742.meta 2024-12-09T14:26:35,753 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43969:43969),(127.0.0.1/127.0.0.1:35751:35751)] 2024-12-09T14:26:35,754 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:26:35,754 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T14:26:35,754 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T14:26:35,754 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T14:26:35,754 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T14:26:35,755 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:35,755 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T14:26:35,755 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T14:26:35,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:26:35,758 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:26:35,758 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:26:35,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:26:35,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:26:35,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:26:35,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:26:35,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:26:35,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:35,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:35,764 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:26:35,765 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740 2024-12-09T14:26:35,767 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740 2024-12-09T14:26:35,768 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:26:35,768 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:26:35,769 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:26:35,770 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:26:35,771 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824724, jitterRate=0.048691391944885254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:26:35,772 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T14:26:35,773 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733754395755Writing region info on filesystem at 1733754395755Initializing all the Stores at 1733754395756 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754395756Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754395757 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754395757Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754395757Cleaning up temporary data from old regions at 1733754395768 (+11 ms)Running coprocessor post-open hooks at 1733754395772 (+4 ms)Region opened successfully at 1733754395773 (+1 ms) 2024-12-09T14:26:35,775 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733754395731 2024-12-09T14:26:35,778 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T14:26:35,778 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T14:26:35,779 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,780 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,40785,1733754394894, state=OPEN 2024-12-09T14:26:35,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:26:35,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:26:35,789 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:26:35,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:26:35,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T14:26:35,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,40785,1733754394894 in 211 msec 2024-12-09T14:26:35,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T14:26:35,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 626 msec 2024-12-09T14:26:35,797 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:26:35,797 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T14:26:35,799 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:26:35,799 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,40785,1733754394894, seqNum=-1] 2024-12-09T14:26:35,799 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:26:35,801 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50629, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:26:35,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 697 msec 2024-12-09T14:26:35,808 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733754395808, completionTime=-1 2024-12-09T14:26:35,808 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T14:26:35,808 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T14:26:35,810 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T14:26:35,810 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733754455810 2024-12-09T14:26:35,810 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733754515810 2024-12-09T14:26:35,810 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T14:26:35,810 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,35663,1733754394839-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,810 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,35663,1733754394839-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,810 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,35663,1733754394839-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,810 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f4e784dc7cb5:35663, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,811 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,811 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,812 DEBUG [master/f4e784dc7cb5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T14:26:35,815 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.879sec 2024-12-09T14:26:35,815 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T14:26:35,815 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T14:26:35,816 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T14:26:35,816 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T14:26:35,816 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T14:26:35,816 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,35663,1733754394839-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:26:35,816 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,35663,1733754394839-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T14:26:35,819 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T14:26:35,819 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T14:26:35,819 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,35663,1733754394839-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:35,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@668848ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:26:35,916 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f4e784dc7cb5,35663,-1 for getting cluster id 2024-12-09T14:26:35,916 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T14:26:35,918 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b46282b3-80c6-49d5-bc31-e622524eee19' 2024-12-09T14:26:35,918 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T14:26:35,919 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b46282b3-80c6-49d5-bc31-e622524eee19" 2024-12-09T14:26:35,919 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ef19b7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:26:35,919 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f4e784dc7cb5,35663,-1] 2024-12-09T14:26:35,919 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T14:26:35,920 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:35,922 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56432, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T14:26:35,923 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e5835fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:26:35,924 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:26:35,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,40785,1733754394894, seqNum=-1] 2024-12-09T14:26:35,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:26:35,927 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51870, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:26:35,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:35,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:35,932 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T14:26:35,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T14:26:35,932 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:26:35,933 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:26:35,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:35,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:35,933 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T14:26:35,933 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T14:26:35,933 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1512884799, stopped=false 2024-12-09T14:26:35,933 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f4e784dc7cb5,35663,1733754394839 2024-12-09T14:26:35,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:26:35,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:26:35,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:35,935 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:26:35,935 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:26:35,936 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:26:35,936 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:35,936 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:26:35,936 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:26:35,936 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f4e784dc7cb5,40785,1733754394894' ***** 2024-12-09T14:26:35,936 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T14:26:35,936 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T14:26:35,937 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T14:26:35,937 INFO [RS:0;f4e784dc7cb5:40785 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T14:26:35,937 INFO [RS:0;f4e784dc7cb5:40785 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T14:26:35,937 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(959): stopping server f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:35,937 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:26:35,937 INFO [RS:0;f4e784dc7cb5:40785 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f4e784dc7cb5:40785. 2024-12-09T14:26:35,937 DEBUG [RS:0;f4e784dc7cb5:40785 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:26:35,937 DEBUG [RS:0;f4e784dc7cb5:40785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:35,937 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T14:26:35,937 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T14:26:35,937 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T14:26:35,938 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T14:26:35,938 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T14:26:35,938 DEBUG [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T14:26:35,938 DEBUG [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T14:26:35,938 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:26:35,938 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:26:35,938 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:26:35,938 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:26:35,938 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:26:35,939 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T14:26:35,958 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740/.tmp/ns/e40de14480f3495782fd5d7fdd0f7472 is 43, key is default/ns:d/1733754395802/Put/seqid=0 2024-12-09T14:26:35,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741835_1011 (size=5153) 2024-12-09T14:26:35,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741835_1011 (size=5153) 2024-12-09T14:26:35,965 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740/.tmp/ns/e40de14480f3495782fd5d7fdd0f7472 2024-12-09T14:26:35,972 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740/.tmp/ns/e40de14480f3495782fd5d7fdd0f7472 as hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740/ns/e40de14480f3495782fd5d7fdd0f7472 2024-12-09T14:26:35,979 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740/ns/e40de14480f3495782fd5d7fdd0f7472, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T14:26:35,980 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-12-09T14:26:35,980 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T14:26:35,986 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T14:26:35,986 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:26:35,987 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:26:35,987 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754395938Running coprocessor pre-close hooks at 1733754395938Disabling compacts and flushes for region at 1733754395938Disabling writes for close at 1733754395938Obtaining lock to block concurrent updates at 1733754395939 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733754395939Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733754395939Flushing stores of hbase:meta,,1.1588230740 at 1733754395940 (+1 ms)Flushing 1588230740/ns: creating writer at 1733754395940Flushing 1588230740/ns: appending metadata at 1733754395957 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733754395957Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e1d4ee9: reopening flushed file at 1733754395971 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1733754395980 (+9 ms)Writing region close event to WAL at 1733754395982 (+2 ms)Running coprocessor post-close hooks at 1733754395986 (+4 ms)Closed at 1733754395987 (+1 ms) 2024-12-09T14:26:35,987 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T14:26:36,138 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(976): stopping server f4e784dc7cb5,40785,1733754394894; all regions closed. 2024-12-09T14:26:36,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,139 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,139 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,139 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741834_1010 (size=1152) 2024-12-09T14:26:36,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741834_1010 (size=1152) 2024-12-09T14:26:36,145 DEBUG [RS:0;f4e784dc7cb5:40785 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/oldWALs 2024-12-09T14:26:36,145 INFO [RS:0;f4e784dc7cb5:40785 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C40785%2C1733754394894.meta:.meta(num 1733754395742) 2024-12-09T14:26:36,145 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,146 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,146 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,146 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,146 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741833_1009 (size=93) 2024-12-09T14:26:36,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741833_1009 (size=93) 2024-12-09T14:26:36,151 DEBUG [RS:0;f4e784dc7cb5:40785 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/oldWALs 2024-12-09T14:26:36,151 INFO [RS:0;f4e784dc7cb5:40785 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C40785%2C1733754394894:(num 1733754395324) 2024-12-09T14:26:36,151 DEBUG [RS:0;f4e784dc7cb5:40785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:36,151 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:26:36,151 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:26:36,151 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.ChoreService(370): Chore service for: regionserver/f4e784dc7cb5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T14:26:36,152 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:26:36,152 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:26:36,152 INFO [RS:0;f4e784dc7cb5:40785 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40785 2024-12-09T14:26:36,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f4e784dc7cb5,40785,1733754394894 2024-12-09T14:26:36,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:26:36,154 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:26:36,155 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f4e784dc7cb5,40785,1733754394894] 2024-12-09T14:26:36,157 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f4e784dc7cb5,40785,1733754394894 already deleted, retry=false 2024-12-09T14:26:36,157 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f4e784dc7cb5,40785,1733754394894 expired; onlineServers=0 2024-12-09T14:26:36,157 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f4e784dc7cb5,35663,1733754394839' ***** 2024-12-09T14:26:36,157 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T14:26:36,158 INFO [M:0;f4e784dc7cb5:35663 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:26:36,158 INFO [M:0;f4e784dc7cb5:35663 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:26:36,158 DEBUG [M:0;f4e784dc7cb5:35663 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T14:26:36,158 DEBUG [M:0;f4e784dc7cb5:35663 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T14:26:36,158 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T14:26:36,158 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754395116 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754395116,5,FailOnTimeoutGroup] 2024-12-09T14:26:36,158 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754395116 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754395116,5,FailOnTimeoutGroup] 2024-12-09T14:26:36,158 INFO [M:0;f4e784dc7cb5:35663 {}] hbase.ChoreService(370): Chore service for: master/f4e784dc7cb5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T14:26:36,158 INFO [M:0;f4e784dc7cb5:35663 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:26:36,158 DEBUG [M:0;f4e784dc7cb5:35663 {}] master.HMaster(1795): Stopping service threads 2024-12-09T14:26:36,159 INFO [M:0;f4e784dc7cb5:35663 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T14:26:36,159 INFO [M:0;f4e784dc7cb5:35663 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:26:36,159 INFO [M:0;f4e784dc7cb5:35663 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T14:26:36,159 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T14:26:36,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T14:26:36,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:36,160 DEBUG [M:0;f4e784dc7cb5:35663 {}] zookeeper.ZKUtil(347): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T14:26:36,160 WARN [M:0;f4e784dc7cb5:35663 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T14:26:36,161 INFO [M:0;f4e784dc7cb5:35663 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/.lastflushedseqids 2024-12-09T14:26:36,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741836_1012 (size=99) 2024-12-09T14:26:36,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741836_1012 (size=99) 2024-12-09T14:26:36,167 INFO [M:0;f4e784dc7cb5:35663 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T14:26:36,167 INFO [M:0;f4e784dc7cb5:35663 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T14:26:36,168 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:26:36,168 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:36,168 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:36,168 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:26:36,168 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:36,168 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T14:26:36,186 DEBUG [M:0;f4e784dc7cb5:35663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5796cb178a8b4cfb9949e20eb686ab0c is 82, key is hbase:meta,,1/info:regioninfo/1733754395779/Put/seqid=0 2024-12-09T14:26:36,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741837_1013 (size=5672) 2024-12-09T14:26:36,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741837_1013 (size=5672) 2024-12-09T14:26:36,193 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5796cb178a8b4cfb9949e20eb686ab0c 2024-12-09T14:26:36,216 DEBUG [M:0;f4e784dc7cb5:35663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7131caf0c1484890b5feacf3d488636b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733754395807/Put/seqid=0 2024-12-09T14:26:36,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741838_1014 (size=5275) 2024-12-09T14:26:36,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741838_1014 (size=5275) 2024-12-09T14:26:36,222 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7131caf0c1484890b5feacf3d488636b 2024-12-09T14:26:36,246 DEBUG [M:0;f4e784dc7cb5:35663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3e470d4fa9b4cab8cfd99f62af3accd is 69, key is f4e784dc7cb5,40785,1733754394894/rs:state/1733754395148/Put/seqid=0 2024-12-09T14:26:36,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741839_1015 (size=5156) 2024-12-09T14:26:36,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741839_1015 (size=5156) 2024-12-09T14:26:36,252 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3e470d4fa9b4cab8cfd99f62af3accd 2024-12-09T14:26:36,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:26:36,256 INFO [RS:0;f4e784dc7cb5:40785 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:26:36,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40785-0x1012b9428e50001, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:26:36,256 INFO [RS:0;f4e784dc7cb5:40785 {}] regionserver.HRegionServer(1031): Exiting; stopping=f4e784dc7cb5,40785,1733754394894; zookeeper connection closed. 2024-12-09T14:26:36,256 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@78a83004 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@78a83004 2024-12-09T14:26:36,256 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T14:26:36,275 DEBUG [M:0;f4e784dc7cb5:35663 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bb0e7a7ba2b34473a974f7f5e464adfe is 52, key is load_balancer_on/state:d/1733754395931/Put/seqid=0 2024-12-09T14:26:36,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741840_1016 (size=5056) 2024-12-09T14:26:36,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741840_1016 (size=5056) 2024-12-09T14:26:36,281 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bb0e7a7ba2b34473a974f7f5e464adfe 2024-12-09T14:26:36,288 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5796cb178a8b4cfb9949e20eb686ab0c as hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5796cb178a8b4cfb9949e20eb686ab0c 2024-12-09T14:26:36,294 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5796cb178a8b4cfb9949e20eb686ab0c, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T14:26:36,296 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7131caf0c1484890b5feacf3d488636b as hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7131caf0c1484890b5feacf3d488636b 2024-12-09T14:26:36,302 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7131caf0c1484890b5feacf3d488636b, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T14:26:36,303 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b3e470d4fa9b4cab8cfd99f62af3accd as hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3e470d4fa9b4cab8cfd99f62af3accd 2024-12-09T14:26:36,309 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b3e470d4fa9b4cab8cfd99f62af3accd, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T14:26:36,310 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bb0e7a7ba2b34473a974f7f5e464adfe as hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bb0e7a7ba2b34473a974f7f5e464adfe 2024-12-09T14:26:36,316 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37339/user/jenkins/test-data/9086223d-5625-42cc-b3ff-c9f188cb206b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bb0e7a7ba2b34473a974f7f5e464adfe, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T14:26:36,317 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false 2024-12-09T14:26:36,319 INFO [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:36,319 DEBUG [M:0;f4e784dc7cb5:35663 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754396168Disabling compacts and flushes for region at 1733754396168Disabling writes for close at 1733754396168Obtaining lock to block concurrent updates at 1733754396168Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733754396168Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733754396169 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733754396169Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733754396170 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733754396186 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733754396186Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733754396198 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733754396215 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733754396215Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733754396228 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733754396245 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733754396245Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733754396259 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733754396275 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733754396275Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a3d6ca1: reopening flushed file at 1733754396287 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51891f2f: reopening flushed file at 1733754396295 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12d2e771: reopening flushed file at 1733754396302 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4dae55fd: reopening flushed file at 1733754396309 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false at 1733754396317 (+8 ms)Writing region close event to WAL at 1733754396319 (+2 ms)Closed at 1733754396319 2024-12-09T14:26:36,321 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,321 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,321 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,321 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44267 is added to blk_1073741830_1006 (size=10311) 2024-12-09T14:26:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741830_1006 (size=10311) 2024-12-09T14:26:36,324 INFO [M:0;f4e784dc7cb5:35663 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T14:26:36,325 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:26:36,325 INFO [M:0;f4e784dc7cb5:35663 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35663 2024-12-09T14:26:36,325 INFO [M:0;f4e784dc7cb5:35663 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:26:36,427 INFO [M:0;f4e784dc7cb5:35663 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:26:36,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:26:36,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35663-0x1012b9428e50000, quorum=127.0.0.1:63191, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:26:36,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b95e72c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:36,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@376d199b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:36,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:36,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@194f043a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:36,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f11c100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:36,432 WARN [BP-946150497-172.17.0.3-1733754394114 heartbeating to localhost/127.0.0.1:37339 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:26:36,432 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:26:36,432 WARN [BP-946150497-172.17.0.3-1733754394114 heartbeating to localhost/127.0.0.1:37339 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-946150497-172.17.0.3-1733754394114 (Datanode Uuid acace847-30e6-47cb-89b5-31e661391b6e) service to localhost/127.0.0.1:37339 2024-12-09T14:26:36,432 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:26:36,433 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/data/data3/current/BP-946150497-172.17.0.3-1733754394114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:36,433 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/data/data4/current/BP-946150497-172.17.0.3-1733754394114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:36,433 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:26:36,436 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@641eaf99{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:36,437 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@276f8783{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:36,437 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:36,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a0f3c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:36,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48bb784e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:36,438 WARN [BP-946150497-172.17.0.3-1733754394114 heartbeating to localhost/127.0.0.1:37339 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:26:36,438 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:26:36,438 WARN [BP-946150497-172.17.0.3-1733754394114 heartbeating to localhost/127.0.0.1:37339 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-946150497-172.17.0.3-1733754394114 (Datanode Uuid 1a7ccbdd-601b-4baf-873b-2cd079b3d084) service to localhost/127.0.0.1:37339 2024-12-09T14:26:36,438 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:26:36,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/data/data1/current/BP-946150497-172.17.0.3-1733754394114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:36,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/cluster_6af415c7-c106-31d8-e375-f31c185ec782/data/data2/current/BP-946150497-172.17.0.3-1733754394114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:36,439 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:26:36,445 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26a9efc0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:26:36,446 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45e3157d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:36,446 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:36,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46d26a79{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:36,446 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e979f8d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:36,453 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T14:26:36,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T14:26:36,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T14:26:36,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.log.dir so I do NOT create it in target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba 2024-12-09T14:26:36,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d9a1e24-3f58-8421-974f-3a13348c7156/hadoop.tmp.dir so I do NOT create it in target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba 2024-12-09T14:26:36,475 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989, deleteOnExit=true 2024-12-09T14:26:36,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T14:26:36,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/test.cache.data in system properties and HBase conf 2024-12-09T14:26:36,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T14:26:36,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir in system properties and HBase conf 2024-12-09T14:26:36,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T14:26:36,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T14:26:36,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T14:26:36,475 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/nfs.dump.dir in system properties and HBase conf 2024-12-09T14:26:36,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/java.io.tmpdir in system properties and HBase conf 2024-12-09T14:26:36,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:26:36,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T14:26:36,477 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T14:26:36,493 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:26:36,572 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:36,577 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:36,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:36,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:36,581 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:26:36,582 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:36,583 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e7d5546{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:36,583 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@142d24a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:36,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77cd23f0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/java.io.tmpdir/jetty-localhost-39165-hadoop-hdfs-3_4_1-tests_jar-_-any-14602925894160736705/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:26:36,702 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3ed3a961{HTTP/1.1, (http/1.1)}{localhost:39165} 2024-12-09T14:26:36,702 INFO [Time-limited test {}] server.Server(415): Started @104560ms 2024-12-09T14:26:36,717 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:26:36,803 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:36,806 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:36,807 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:36,807 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:36,807 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:26:36,808 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7183cb8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:36,808 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65dec1b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:36,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67cf8368{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/java.io.tmpdir/jetty-localhost-33117-hadoop-hdfs-3_4_1-tests_jar-_-any-10538569309375154560/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:36,929 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30e7c448{HTTP/1.1, (http/1.1)}{localhost:33117} 2024-12-09T14:26:36,929 INFO [Time-limited test {}] server.Server(415): Started @104787ms 2024-12-09T14:26:36,931 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:26:36,969 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:36,972 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:36,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:36,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:36,974 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:26:36,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7982676d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:36,975 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66182b08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:37,046 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data2/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:37,046 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data1/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:37,066 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:26:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab44daa42f82b81f with lease ID 0xcc254b5549ab9872: Processing first storage report for DS-7e267eb6-25af-47df-82ae-b4bebe68e807 from datanode DatanodeRegistration(127.0.0.1:38909, datanodeUuid=4a0377c6-52d3-4ff5-9888-ab20d26e2836, infoPort=38455, infoSecurePort=0, ipcPort=39227, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab44daa42f82b81f with lease ID 0xcc254b5549ab9872: from storage DS-7e267eb6-25af-47df-82ae-b4bebe68e807 node DatanodeRegistration(127.0.0.1:38909, datanodeUuid=4a0377c6-52d3-4ff5-9888-ab20d26e2836, infoPort=38455, infoSecurePort=0, ipcPort=39227, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab44daa42f82b81f with lease ID 0xcc254b5549ab9872: Processing first storage report for DS-10bcadef-aa71-4b50-b052-7dfd0df0e6cd from datanode DatanodeRegistration(127.0.0.1:38909, datanodeUuid=4a0377c6-52d3-4ff5-9888-ab20d26e2836, infoPort=38455, infoSecurePort=0, ipcPort=39227, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab44daa42f82b81f with lease ID 0xcc254b5549ab9872: from storage DS-10bcadef-aa71-4b50-b052-7dfd0df0e6cd node DatanodeRegistration(127.0.0.1:38909, datanodeUuid=4a0377c6-52d3-4ff5-9888-ab20d26e2836, infoPort=38455, infoSecurePort=0, ipcPort=39227, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:37,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@229a8eec{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/java.io.tmpdir/jetty-localhost-39065-hadoop-hdfs-3_4_1-tests_jar-_-any-7134962343035491298/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:37,104 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7720beab{HTTP/1.1, (http/1.1)}{localhost:39065} 2024-12-09T14:26:37,104 INFO [Time-limited test {}] server.Server(415): Started @104962ms 2024-12-09T14:26:37,106 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:26:37,179 INFO [regionserver/f4e784dc7cb5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:26:37,215 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data3/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:37,215 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data4/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:37,233 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:26:37,235 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x825fe64f376e8781 with lease ID 0xcc254b5549ab9873: Processing first storage report for DS-948aca14-e576-4534-986d-672d3db444f3 from datanode DatanodeRegistration(127.0.0.1:39239, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46245, infoSecurePort=0, ipcPort=39921, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:37,236 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x825fe64f376e8781 with lease ID 0xcc254b5549ab9873: from storage DS-948aca14-e576-4534-986d-672d3db444f3 node DatanodeRegistration(127.0.0.1:39239, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46245, infoSecurePort=0, ipcPort=39921, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T14:26:37,236 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x825fe64f376e8781 with lease ID 0xcc254b5549ab9873: Processing first storage report for DS-263eb177-1825-412d-b92f-942971985085 from datanode DatanodeRegistration(127.0.0.1:39239, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46245, infoSecurePort=0, ipcPort=39921, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:37,236 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x825fe64f376e8781 with lease ID 0xcc254b5549ab9873: from storage DS-263eb177-1825-412d-b92f-942971985085 node DatanodeRegistration(127.0.0.1:39239, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46245, infoSecurePort=0, ipcPort=39921, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:37,339 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba 2024-12-09T14:26:37,343 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/zookeeper_0, clientPort=54193, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T14:26:37,344 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54193 2024-12-09T14:26:37,345 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:37,347 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:37,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:26:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:26:37,360 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd with version=8 2024-12-09T14:26:37,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase-staging 2024-12-09T14:26:37,363 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:26:37,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:37,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:37,363 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:26:37,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:37,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:26:37,363 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T14:26:37,363 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:26:37,364 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34437 2024-12-09T14:26:37,366 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34437 connecting to ZooKeeper ensemble=127.0.0.1:54193 2024-12-09T14:26:37,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:344370x0, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:26:37,374 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34437-0x1012b9432be0000 connected 2024-12-09T14:26:37,394 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:37,396 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:37,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:26:37,399 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd, hbase.cluster.distributed=false 2024-12-09T14:26:37,400 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:26:37,402 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34437 2024-12-09T14:26:37,402 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34437 2024-12-09T14:26:37,402 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34437 2024-12-09T14:26:37,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34437 2024-12-09T14:26:37,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34437 2024-12-09T14:26:37,420 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:26:37,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:37,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:37,420 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:26:37,420 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:37,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:26:37,421 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T14:26:37,421 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:26:37,422 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41507 2024-12-09T14:26:37,423 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41507 connecting to ZooKeeper ensemble=127.0.0.1:54193 2024-12-09T14:26:37,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:37,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:37,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415070x0, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:26:37,431 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:415070x0, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:26:37,431 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41507-0x1012b9432be0001 connected 2024-12-09T14:26:37,432 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T14:26:37,434 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T14:26:37,435 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T14:26:37,437 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:26:37,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41507 2024-12-09T14:26:37,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41507 2024-12-09T14:26:37,438 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41507 2024-12-09T14:26:37,438 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41507 2024-12-09T14:26:37,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41507 2024-12-09T14:26:37,454 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f4e784dc7cb5:34437 2024-12-09T14:26:37,454 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:37,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:26:37,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:26:37,458 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:37,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T14:26:37,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,460 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T14:26:37,462 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f4e784dc7cb5,34437,1733754397363 from backup master directory 2024-12-09T14:26:37,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:26:37,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:37,463 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:26:37,463 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:37,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:26:37,469 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/hbase.id] with ID: e3af21c7-3712-4977-bd20-0ccd0eabadcd 2024-12-09T14:26:37,469 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/.tmp/hbase.id 2024-12-09T14:26:37,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:26:37,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:26:37,479 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/.tmp/hbase.id]:[hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/hbase.id] 2024-12-09T14:26:37,496 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:37,496 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T14:26:37,498 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T14:26:37,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:26:37,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:26:37,509 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:26:37,510 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T14:26:37,510 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:26:37,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:26:37,521 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store 2024-12-09T14:26:37,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:26:37,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:26:37,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:26:37,540 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:37,540 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:26:37,540 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:37,540 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:37,540 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:26:37,540 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:37,540 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:26:37,540 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754397540Disabling compacts and flushes for region at 1733754397540Disabling writes for close at 1733754397540Writing region close event to WAL at 1733754397540Closed at 1733754397540 2024-12-09T14:26:37,542 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/.initializing 2024-12-09T14:26:37,542 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:37,546 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C34437%2C1733754397363, suffix=, logDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363, archiveDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/oldWALs, maxLogs=10 2024-12-09T14:26:37,547 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 2024-12-09T14:26:37,555 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 2024-12-09T14:26:37,559 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38455:38455),(127.0.0.1/127.0.0.1:46245:46245)] 2024-12-09T14:26:37,560 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:26:37,560 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:37,561 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,561 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T14:26:37,565 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:37,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,568 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T14:26:37,568 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:26:37,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T14:26:37,571 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:26:37,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,573 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T14:26:37,573 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:26:37,574 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,575 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,576 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,577 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,577 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,578 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T14:26:37,580 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:26:37,583 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:26:37,583 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710855, jitterRate=-0.09610135853290558}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T14:26:37,585 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733754397561Initializing all the Stores at 1733754397562 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754397562Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754397563 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754397563Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754397563Cleaning up temporary data from old regions at 1733754397577 (+14 ms)Region opened successfully at 1733754397585 (+8 ms) 2024-12-09T14:26:37,585 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T14:26:37,590 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15066223, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:26:37,592 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T14:26:37,592 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T14:26:37,592 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T14:26:37,592 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T14:26:37,593 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T14:26:37,594 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T14:26:37,594 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T14:26:37,597 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T14:26:37,598 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T14:26:37,600 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T14:26:37,601 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T14:26:37,601 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T14:26:37,603 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T14:26:37,603 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T14:26:37,604 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T14:26:37,606 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T14:26:37,607 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T14:26:37,608 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T14:26:37,610 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T14:26:37,613 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T14:26:37,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:26:37,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:26:37,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,615 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f4e784dc7cb5,34437,1733754397363, sessionid=0x1012b9432be0000, setting cluster-up flag (Was=false) 2024-12-09T14:26:37,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,624 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T14:26:37,626 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:37,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:37,637 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T14:26:37,639 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:37,641 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T14:26:37,643 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T14:26:37,644 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T14:26:37,644 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T14:26:37,644 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f4e784dc7cb5,34437,1733754397363 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T14:26:37,646 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:26:37,646 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:26:37,647 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:26:37,647 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:26:37,647 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f4e784dc7cb5:0, corePoolSize=10, maxPoolSize=10 2024-12-09T14:26:37,647 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,647 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:26:37,647 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,657 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733754427657 2024-12-09T14:26:37,657 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:26:37,657 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T14:26:37,657 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T14:26:37,657 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T14:26:37,657 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T14:26:37,657 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T14:26:37,657 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T14:26:37,658 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T14:26:37,658 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,658 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T14:26:37,658 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T14:26:37,659 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T14:26:37,659 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,659 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T14:26:37,659 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T14:26:37,659 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T14:26:37,659 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754397659,5,FailOnTimeoutGroup] 2024-12-09T14:26:37,660 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754397659,5,FailOnTimeoutGroup] 2024-12-09T14:26:37,660 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,660 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T14:26:37,660 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,660 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:26:37,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:26:37,670 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T14:26:37,670 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd 2024-12-09T14:26:37,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:26:37,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:26:37,680 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:37,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:26:37,684 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:26:37,684 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:37,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:26:37,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:26:37,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:37,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:26:37,689 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:26:37,690 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:37,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:26:37,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:26:37,692 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:37,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:37,693 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:26:37,694 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740 2024-12-09T14:26:37,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740 2024-12-09T14:26:37,696 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:26:37,696 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:26:37,697 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:26:37,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:26:37,702 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:26:37,702 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751918, jitterRate=-0.043887004256248474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:26:37,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733754397680Initializing all the Stores at 1733754397681 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754397682 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754397682Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754397682Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754397682Cleaning up temporary data from old regions at 1733754397696 (+14 ms)Region opened successfully at 1733754397703 (+7 ms) 2024-12-09T14:26:37,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:26:37,704 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:26:37,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:26:37,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:26:37,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:26:37,704 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:26:37,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754397703Disabling compacts and flushes for region at 1733754397703Disabling writes for close at 1733754397704 (+1 ms)Writing region close event to WAL at 1733754397704Closed at 1733754397704 2024-12-09T14:26:37,706 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:26:37,706 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T14:26:37,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T14:26:37,708 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:26:37,710 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T14:26:37,742 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(746): ClusterId : e3af21c7-3712-4977-bd20-0ccd0eabadcd 2024-12-09T14:26:37,742 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T14:26:37,751 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T14:26:37,751 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T14:26:37,754 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T14:26:37,754 DEBUG [RS:0;f4e784dc7cb5:41507 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1459431e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:26:37,771 DEBUG [RS:0;f4e784dc7cb5:41507 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f4e784dc7cb5:41507 2024-12-09T14:26:37,771 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T14:26:37,771 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T14:26:37,771 DEBUG [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T14:26:37,773 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,34437,1733754397363 with port=41507, startcode=1733754397420 2024-12-09T14:26:37,773 DEBUG [RS:0;f4e784dc7cb5:41507 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T14:26:37,775 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40249, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T14:26:37,776 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34437 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:37,776 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34437 {}] master.ServerManager(517): Registering regionserver=f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:37,779 DEBUG [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd 2024-12-09T14:26:37,779 DEBUG [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43639 2024-12-09T14:26:37,779 DEBUG [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T14:26:37,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:26:37,782 DEBUG [RS:0;f4e784dc7cb5:41507 {}] zookeeper.ZKUtil(111): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:37,782 WARN [RS:0;f4e784dc7cb5:41507 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:26:37,782 INFO [RS:0;f4e784dc7cb5:41507 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:26:37,782 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f4e784dc7cb5,41507,1733754397420] 2024-12-09T14:26:37,782 DEBUG [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:37,788 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T14:26:37,791 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T14:26:37,792 INFO [RS:0;f4e784dc7cb5:41507 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:26:37,792 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,792 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T14:26:37,793 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T14:26:37,794 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,794 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,795 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:37,795 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:26:37,795 DEBUG [RS:0;f4e784dc7cb5:41507 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:26:37,796 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,796 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,796 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,796 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,796 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,796 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,41507,1733754397420-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:26:37,818 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T14:26:37,819 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,41507,1733754397420-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,819 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,819 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.Replication(171): f4e784dc7cb5,41507,1733754397420 started 2024-12-09T14:26:37,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:26:37,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T14:26:37,823 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T14:26:37,840 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:37,840 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1482): Serving as f4e784dc7cb5,41507,1733754397420, RpcServer on f4e784dc7cb5/172.17.0.3:41507, sessionid=0x1012b9432be0001 2024-12-09T14:26:37,840 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T14:26:37,840 DEBUG [RS:0;f4e784dc7cb5:41507 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:37,840 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,41507,1733754397420' 2024-12-09T14:26:37,840 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T14:26:37,841 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T14:26:37,842 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T14:26:37,842 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T14:26:37,842 DEBUG [RS:0;f4e784dc7cb5:41507 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:37,842 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,41507,1733754397420' 2024-12-09T14:26:37,842 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T14:26:37,843 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T14:26:37,843 DEBUG [RS:0;f4e784dc7cb5:41507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T14:26:37,843 INFO [RS:0;f4e784dc7cb5:41507 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T14:26:37,843 INFO [RS:0;f4e784dc7cb5:41507 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T14:26:37,860 WARN [f4e784dc7cb5:34437 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T14:26:37,946 INFO [RS:0;f4e784dc7cb5:41507 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C41507%2C1733754397420, suffix=, logDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420, archiveDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs, maxLogs=32 2024-12-09T14:26:37,974 INFO [RS:0;f4e784dc7cb5:41507 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 2024-12-09T14:26:37,982 INFO [RS:0;f4e784dc7cb5:41507 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 2024-12-09T14:26:37,984 DEBUG [RS:0;f4e784dc7cb5:41507 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38455:38455),(127.0.0.1/127.0.0.1:46245:46245)] 2024-12-09T14:26:38,111 DEBUG [f4e784dc7cb5:34437 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T14:26:38,111 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:38,113 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,41507,1733754397420, state=OPENING 2024-12-09T14:26:38,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:38,115 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T14:26:38,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:38,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:26:38,118 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:26:38,118 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:26:38,118 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:26:38,118 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,41507,1733754397420}] 2024-12-09T14:26:38,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:38,272 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T14:26:38,274 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37191, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T14:26:38,278 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T14:26:38,279 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:26:38,280 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C41507%2C1733754397420.meta, suffix=.meta, logDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420, archiveDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs, maxLogs=32 2024-12-09T14:26:38,281 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta 2024-12-09T14:26:38,286 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta 2024-12-09T14:26:38,287 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38455:38455),(127.0.0.1/127.0.0.1:46245:46245)] 2024-12-09T14:26:38,290 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:26:38,290 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T14:26:38,290 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T14:26:38,291 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T14:26:38,291 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T14:26:38,291 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:38,291 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T14:26:38,291 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T14:26:38,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:26:38,293 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:26:38,293 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:38,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:38,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:26:38,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:26:38,295 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:38,295 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:38,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:26:38,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:26:38,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:38,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:38,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:26:38,298 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:26:38,298 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:38,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:26:38,299 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:26:38,299 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740 2024-12-09T14:26:38,300 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740 2024-12-09T14:26:38,302 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:26:38,302 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:26:38,302 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:26:38,304 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:26:38,305 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818203, jitterRate=0.040399372577667236}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:26:38,305 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T14:26:38,306 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733754398291Writing region info on filesystem at 1733754398291Initializing all the Stores at 1733754398292 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754398292Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754398292Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754398292Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754398292Cleaning up temporary data from old regions at 1733754398302 (+10 ms)Running coprocessor post-open hooks at 1733754398305 (+3 ms)Region opened successfully at 1733754398306 (+1 ms) 2024-12-09T14:26:38,307 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733754398272 2024-12-09T14:26:38,311 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T14:26:38,311 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T14:26:38,312 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:38,313 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,41507,1733754397420, state=OPEN 2024-12-09T14:26:38,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:26:38,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:26:38,319 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:26:38,319 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:26:38,319 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:38,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T14:26:38,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,41507,1733754397420 in 201 msec 2024-12-09T14:26:38,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T14:26:38,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 617 msec 2024-12-09T14:26:38,327 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:26:38,327 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T14:26:38,329 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:26:38,329 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,41507,1733754397420, seqNum=-1] 2024-12-09T14:26:38,330 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:26:38,331 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35301, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:26:38,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 695 msec 2024-12-09T14:26:38,338 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733754398338, completionTime=-1 2024-12-09T14:26:38,338 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T14:26:38,338 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T14:26:38,340 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T14:26:38,340 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733754458340 2024-12-09T14:26:38,340 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733754518340 2024-12-09T14:26:38,340 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T14:26:38,341 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,34437,1733754397363-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,341 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,34437,1733754397363-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,341 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,34437,1733754397363-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,341 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f4e784dc7cb5:34437, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,341 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,341 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,343 DEBUG [master/f4e784dc7cb5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T14:26:38,345 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.882sec 2024-12-09T14:26:38,345 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T14:26:38,345 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T14:26:38,345 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T14:26:38,345 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T14:26:38,345 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T14:26:38,345 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,34437,1733754397363-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:26:38,345 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,34437,1733754397363-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T14:26:38,347 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T14:26:38,347 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T14:26:38,348 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,34437,1733754397363-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2efeeef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:26:38,441 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f4e784dc7cb5,34437,-1 for getting cluster id 2024-12-09T14:26:38,441 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T14:26:38,443 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e3af21c7-3712-4977-bd20-0ccd0eabadcd' 2024-12-09T14:26:38,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T14:26:38,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e3af21c7-3712-4977-bd20-0ccd0eabadcd" 2024-12-09T14:26:38,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d65fa4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:26:38,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f4e784dc7cb5,34437,-1] 2024-12-09T14:26:38,444 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T14:26:38,445 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:26:38,446 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50948, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T14:26:38,447 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47d1c4f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:26:38,447 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:26:38,448 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,41507,1733754397420, seqNum=-1] 2024-12-09T14:26:38,449 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:26:38,450 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57988, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:26:38,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:38,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:38,455 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T14:26:38,471 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:26:38,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:38,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:38,472 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:26:38,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:26:38,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:26:38,472 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T14:26:38,472 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:26:38,473 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41215 2024-12-09T14:26:38,474 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41215 connecting to ZooKeeper ensemble=127.0.0.1:54193 2024-12-09T14:26:38,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:38,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:26:38,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412150x0, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:26:38,483 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41215-0x1012b9432be0002 connected 2024-12-09T14:26:38,483 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-09T14:26:38,483 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-09T14:26:38,484 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T14:26:38,484 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T14:26:38,485 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T14:26:38,487 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:26:38,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41215 2024-12-09T14:26:38,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41215 2024-12-09T14:26:38,491 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41215 2024-12-09T14:26:38,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41215 2024-12-09T14:26:38,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41215 2024-12-09T14:26:38,493 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(746): ClusterId : e3af21c7-3712-4977-bd20-0ccd0eabadcd 2024-12-09T14:26:38,493 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T14:26:38,495 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T14:26:38,495 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T14:26:38,497 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T14:26:38,498 DEBUG [RS:1;f4e784dc7cb5:41215 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c027df9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:26:38,511 DEBUG [RS:1;f4e784dc7cb5:41215 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;f4e784dc7cb5:41215 2024-12-09T14:26:38,511 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T14:26:38,511 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T14:26:38,511 DEBUG [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T14:26:38,511 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,34437,1733754397363 with port=41215, startcode=1733754398471 2024-12-09T14:26:38,512 DEBUG [RS:1;f4e784dc7cb5:41215 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T14:26:38,513 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39373, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T14:26:38,514 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34437 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f4e784dc7cb5,41215,1733754398471 2024-12-09T14:26:38,514 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34437 {}] master.ServerManager(517): Registering regionserver=f4e784dc7cb5,41215,1733754398471 2024-12-09T14:26:38,516 DEBUG [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd 2024-12-09T14:26:38,516 DEBUG [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43639 2024-12-09T14:26:38,516 DEBUG [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T14:26:38,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:26:38,518 DEBUG [RS:1;f4e784dc7cb5:41215 {}] zookeeper.ZKUtil(111): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f4e784dc7cb5,41215,1733754398471 2024-12-09T14:26:38,518 WARN [RS:1;f4e784dc7cb5:41215 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:26:38,518 INFO [RS:1;f4e784dc7cb5:41215 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:26:38,518 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f4e784dc7cb5,41215,1733754398471] 2024-12-09T14:26:38,518 DEBUG [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471 2024-12-09T14:26:38,522 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T14:26:38,524 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T14:26:38,524 INFO [RS:1;f4e784dc7cb5:41215 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:26:38,524 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,524 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T14:26:38,525 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T14:26:38,525 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,525 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,525 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,525 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:26:38,526 DEBUG [RS:1;f4e784dc7cb5:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:26:38,526 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,527 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,527 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,527 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,527 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,527 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,41215,1733754398471-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:26:38,541 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T14:26:38,542 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,41215,1733754398471-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,542 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,542 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.Replication(171): f4e784dc7cb5,41215,1733754398471 started 2024-12-09T14:26:38,556 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:26:38,556 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(1482): Serving as f4e784dc7cb5,41215,1733754398471, RpcServer on f4e784dc7cb5/172.17.0.3:41215, sessionid=0x1012b9432be0002 2024-12-09T14:26:38,557 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T14:26:38,557 DEBUG [RS:1;f4e784dc7cb5:41215 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f4e784dc7cb5,41215,1733754398471 2024-12-09T14:26:38,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;f4e784dc7cb5:41215,5,FailOnTimeoutGroup] 2024-12-09T14:26:38,557 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,41215,1733754398471' 2024-12-09T14:26:38,557 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T14:26:38,557 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-09T14:26:38,557 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T14:26:38,557 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T14:26:38,558 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T14:26:38,558 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T14:26:38,558 DEBUG [RS:1;f4e784dc7cb5:41215 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f4e784dc7cb5,41215,1733754398471 2024-12-09T14:26:38,558 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,41215,1733754398471' 2024-12-09T14:26:38,558 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T14:26:38,559 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T14:26:38,559 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is f4e784dc7cb5,34437,1733754397363 2024-12-09T14:26:38,559 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@cb496b3 2024-12-09T14:26:38,559 DEBUG [RS:1;f4e784dc7cb5:41215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T14:26:38,559 INFO [RS:1;f4e784dc7cb5:41215 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T14:26:38,559 INFO [RS:1;f4e784dc7cb5:41215 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T14:26:38,559 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T14:26:38,562 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52164, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T14:26:38,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34437 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T14:26:38,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34437 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T14:26:38,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34437 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:26:38,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34437 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T14:26:38,567 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T14:26:38,567 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:38,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34437 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-09T14:26:38,568 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T14:26:38,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34437 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:26:38,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741835_1011 (size=393) 2024-12-09T14:26:38,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741835_1011 (size=393) 2024-12-09T14:26:38,586 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2cb998fa9bbd6a409dda91836dbca4ce, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd 2024-12-09T14:26:38,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39239 is added to blk_1073741836_1012 (size=76) 2024-12-09T14:26:38,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38909 is added to blk_1073741836_1012 (size=76) 2024-12-09T14:26:38,599 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:38,599 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 2cb998fa9bbd6a409dda91836dbca4ce, disabling compactions & flushes 2024-12-09T14:26:38,599 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:38,599 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:38,599 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. after waiting 0 ms 2024-12-09T14:26:38,599 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:38,599 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:38,599 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2cb998fa9bbd6a409dda91836dbca4ce: Waiting for close lock at 1733754398599Disabling compacts and flushes for region at 1733754398599Disabling writes for close at 1733754398599Writing region close event to WAL at 1733754398599Closed at 1733754398599 2024-12-09T14:26:38,601 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T14:26:38,601 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733754398601"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733754398601"}]},"ts":"1733754398601"} 2024-12-09T14:26:38,604 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T14:26:38,605 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T14:26:38,605 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754398605"}]},"ts":"1733754398605"} 2024-12-09T14:26:38,607 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-09T14:26:38,608 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2cb998fa9bbd6a409dda91836dbca4ce, ASSIGN}] 2024-12-09T14:26:38,609 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2cb998fa9bbd6a409dda91836dbca4ce, ASSIGN 2024-12-09T14:26:38,610 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2cb998fa9bbd6a409dda91836dbca4ce, ASSIGN; state=OFFLINE, location=f4e784dc7cb5,41507,1733754397420; forceNewPlan=false, retain=false 2024-12-09T14:26:38,635 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:26:38,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:38,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:38,661 INFO [RS:1;f4e784dc7cb5:41215 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C41215%2C1733754398471, suffix=, logDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471, archiveDir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs, maxLogs=32 2024-12-09T14:26:38,662 INFO [RS:1;f4e784dc7cb5:41215 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 2024-12-09T14:26:38,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:38,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:38,671 INFO [RS:1;f4e784dc7cb5:41215 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 2024-12-09T14:26:38,674 DEBUG [RS:1;f4e784dc7cb5:41215 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46245:46245),(127.0.0.1/127.0.0.1:38455:38455)] 2024-12-09T14:26:38,761 INFO [f4e784dc7cb5:34437 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T14:26:38,762 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2cb998fa9bbd6a409dda91836dbca4ce, regionState=OPENING, regionLocation=f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:38,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2cb998fa9bbd6a409dda91836dbca4ce, ASSIGN because future has completed 2024-12-09T14:26:38,765 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2cb998fa9bbd6a409dda91836dbca4ce, server=f4e784dc7cb5,41507,1733754397420}] 2024-12-09T14:26:38,922 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:38,923 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2cb998fa9bbd6a409dda91836dbca4ce, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:26:38,923 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,923 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:26:38,923 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,923 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,925 INFO [StoreOpener-2cb998fa9bbd6a409dda91836dbca4ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,926 INFO [StoreOpener-2cb998fa9bbd6a409dda91836dbca4ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cb998fa9bbd6a409dda91836dbca4ce columnFamilyName info 2024-12-09T14:26:38,926 DEBUG [StoreOpener-2cb998fa9bbd6a409dda91836dbca4ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:26:38,927 INFO [StoreOpener-2cb998fa9bbd6a409dda91836dbca4ce-1 {}] regionserver.HStore(327): Store=2cb998fa9bbd6a409dda91836dbca4ce/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:26:38,927 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,928 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,928 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,928 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,928 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,930 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,932 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:26:38,932 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2cb998fa9bbd6a409dda91836dbca4ce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792610, jitterRate=0.007856622338294983}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T14:26:38,932 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:38,933 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2cb998fa9bbd6a409dda91836dbca4ce: Running coprocessor pre-open hook at 1733754398924Writing region info on filesystem at 1733754398924Initializing all the Stores at 1733754398924Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754398924Cleaning up temporary data from old regions at 1733754398928 (+4 ms)Running coprocessor post-open hooks at 1733754398932 (+4 ms)Region opened successfully at 1733754398933 (+1 ms) 2024-12-09T14:26:38,934 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce., pid=6, masterSystemTime=1733754398918 2024-12-09T14:26:38,937 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:38,937 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:38,938 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2cb998fa9bbd6a409dda91836dbca4ce, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,41507,1733754397420 2024-12-09T14:26:38,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2cb998fa9bbd6a409dda91836dbca4ce, server=f4e784dc7cb5,41507,1733754397420 because future has completed 2024-12-09T14:26:38,944 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T14:26:38,944 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2cb998fa9bbd6a409dda91836dbca4ce, server=f4e784dc7cb5,41507,1733754397420 in 177 msec 2024-12-09T14:26:38,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T14:26:38,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2cb998fa9bbd6a409dda91836dbca4ce, ASSIGN in 336 msec 2024-12-09T14:26:38,948 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T14:26:38,949 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754398948"}]},"ts":"1733754398948"} 2024-12-09T14:26:38,951 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-09T14:26:38,952 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T14:26:38,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 389 msec 2024-12-09T14:26:43,793 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:26:43,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:43,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:43,820 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:43,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:26:43,831 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-09T14:26:47,821 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T14:26:47,822 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T14:26:47,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T14:26:47,822 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-09T14:26:47,823 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:26:47,823 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T14:26:48,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34437 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:26:48,608 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-09T14:26:48,609 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-09T14:26:48,612 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T14:26:48,613 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:48,629 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:48,633 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:48,634 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:48,634 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:48,634 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:26:48,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e308c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:48,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ddd8f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:48,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ef889f8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/java.io.tmpdir/jetty-localhost-43309-hadoop-hdfs-3_4_1-tests_jar-_-any-3733176115230991034/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:48,754 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51f59516{HTTP/1.1, (http/1.1)}{localhost:43309} 2024-12-09T14:26:48,754 INFO [Time-limited test {}] server.Server(415): Started @116612ms 2024-12-09T14:26:48,755 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:26:48,795 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:48,799 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:48,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:48,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:48,800 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:26:48,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2281f2b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:48,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@83c4e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:48,859 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data6/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:48,859 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data5/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:48,880 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:26:48,885 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab588547bb090dd2 with lease ID 0xcc254b5549ab9874: Processing first storage report for DS-7222474e-e52d-498c-8d10-fe75c711b595 from datanode DatanodeRegistration(127.0.0.1:32959, datanodeUuid=5fd1f439-9622-4b64-8599-8ba27b8bbc66, infoPort=33447, infoSecurePort=0, ipcPort=40077, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:48,886 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab588547bb090dd2 with lease ID 0xcc254b5549ab9874: from storage DS-7222474e-e52d-498c-8d10-fe75c711b595 node DatanodeRegistration(127.0.0.1:32959, datanodeUuid=5fd1f439-9622-4b64-8599-8ba27b8bbc66, infoPort=33447, infoSecurePort=0, ipcPort=40077, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T14:26:48,886 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab588547bb090dd2 with lease ID 0xcc254b5549ab9874: Processing first storage report for DS-8f333ea0-cbeb-497d-8f80-067f81aba27f from datanode DatanodeRegistration(127.0.0.1:32959, datanodeUuid=5fd1f439-9622-4b64-8599-8ba27b8bbc66, infoPort=33447, infoSecurePort=0, ipcPort=40077, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:48,886 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab588547bb090dd2 with lease ID 0xcc254b5549ab9874: from storage DS-8f333ea0-cbeb-497d-8f80-067f81aba27f node DatanodeRegistration(127.0.0.1:32959, datanodeUuid=5fd1f439-9622-4b64-8599-8ba27b8bbc66, infoPort=33447, infoSecurePort=0, ipcPort=40077, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:48,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58872ac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/java.io.tmpdir/jetty-localhost-40853-hadoop-hdfs-3_4_1-tests_jar-_-any-7921034117232858586/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:48,941 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3afd309b{HTTP/1.1, (http/1.1)}{localhost:40853} 2024-12-09T14:26:48,941 INFO [Time-limited test {}] server.Server(415): Started @116799ms 2024-12-09T14:26:48,942 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:26:49,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:26:49,004 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:26:49,009 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:26:49,009 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:26:49,010 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:26:49,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719bbb9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:26:49,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc0bdb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:26:49,056 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:49,056 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:49,080 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:26:49,083 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x886aabdb8124e112 with lease ID 0xcc254b5549ab9875: Processing first storage report for DS-2bd456eb-2987-497c-8905-4673ca0fc0b3 from datanode DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:49,083 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x886aabdb8124e112 with lease ID 0xcc254b5549ab9875: from storage DS-2bd456eb-2987-497c-8905-4673ca0fc0b3 node DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:49,083 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x886aabdb8124e112 with lease ID 0xcc254b5549ab9875: Processing first storage report for DS-b362244f-6c7e-446d-844b-e06d6fcbed84 from datanode DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:49,083 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x886aabdb8124e112 with lease ID 0xcc254b5549ab9875: from storage DS-b362244f-6c7e-446d-844b-e06d6fcbed84 node DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:49,145 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5547c7e1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/java.io.tmpdir/jetty-localhost-35223-hadoop-hdfs-3_4_1-tests_jar-_-any-3392030194672263237/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:49,146 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78e445ac{HTTP/1.1, (http/1.1)}{localhost:35223} 2024-12-09T14:26:49,146 INFO [Time-limited test {}] server.Server(415): Started @117003ms 2024-12-09T14:26:49,147 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:26:49,245 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data9/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:49,245 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data10/current/BP-1955786853-172.17.0.3-1733754396514/current, will proceed with Du for space computation calculation, 2024-12-09T14:26:49,269 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:26:49,271 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf160de0dadbff27c with lease ID 0xcc254b5549ab9876: Processing first storage report for DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35 from datanode DatanodeRegistration(127.0.0.1:40341, datanodeUuid=7ea7564b-5716-425e-9aec-33a12de3d648, infoPort=34729, infoSecurePort=0, ipcPort=34385, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:49,271 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf160de0dadbff27c with lease ID 0xcc254b5549ab9876: from storage DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35 node DatanodeRegistration(127.0.0.1:40341, datanodeUuid=7ea7564b-5716-425e-9aec-33a12de3d648, infoPort=34729, infoSecurePort=0, ipcPort=34385, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:49,271 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf160de0dadbff27c with lease ID 0xcc254b5549ab9876: Processing first storage report for DS-90d3fb51-c02d-4703-bd0f-25f4a35d2a2a from datanode DatanodeRegistration(127.0.0.1:40341, datanodeUuid=7ea7564b-5716-425e-9aec-33a12de3d648, infoPort=34729, infoSecurePort=0, ipcPort=34385, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514) 2024-12-09T14:26:49,271 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf160de0dadbff27c with lease ID 0xcc254b5549ab9876: from storage DS-90d3fb51-c02d-4703-bd0f-25f4a35d2a2a node DatanodeRegistration(127.0.0.1:40341, datanodeUuid=7ea7564b-5716-425e-9aec-33a12de3d648, infoPort=34729, infoSecurePort=0, ipcPort=34385, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:26:49,370 WARN [ResponseProcessor for block BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,370 WARN [ResponseProcessor for block BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,370 WARN [ResponseProcessor for block BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,371 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 block BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:49,371 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 block BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:49,371 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 block BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:49,371 WARN [PacketResponder: BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39239] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,371 WARN [ResponseProcessor for block BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,372 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta block BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:49,372 WARN [PacketResponder: BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39239] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,372 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:42276 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42276 dst: /127.0.0.1:38909 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,372 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893678762_22 at /127.0.0.1:50344 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50344 dst: /127.0.0.1:39239 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:42280 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42280 dst: /127.0.0.1:38909 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:42244 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42244 dst: /127.0.0.1:38909 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:44620 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44620 dst: /127.0.0.1:39239 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@229a8eec{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:49,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:44626 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44626 dst: /127.0.0.1:39239 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893678762_22 at /127.0.0.1:58824 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58824 dst: /127.0.0.1:38909 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,374 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7720beab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:49,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:44584 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44584 dst: /127.0.0.1:39239 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:49,374 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:49,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66182b08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:49,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7982676d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:49,378 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:26:49,378 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1955786853-172.17.0.3-1733754396514 (Datanode Uuid e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9) service to localhost/127.0.0.1:43639 2024-12-09T14:26:49,378 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data3/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:49,379 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data4/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:49,379 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:26:49,379 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:26:49,379 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:26:49,380 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 block BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,380 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 block BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,380 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta block BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,380 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 block BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67cf8368{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:49,383 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30e7c448{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:49,383 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:49,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65dec1b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:49,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7183cb8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:49,384 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:26:49,384 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1955786853-172.17.0.3-1733754396514 (Datanode Uuid 4a0377c6-52d3-4ff5-9888-ab20d26e2836) service to localhost/127.0.0.1:43639 2024-12-09T14:26:49,384 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:26:49,384 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:26:49,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data1/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:49,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data2/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:49,385 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:26:49,389 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce., hostname=f4e784dc7cb5,41507,1733754397420, seqNum=2] 2024-12-09T14:26:49,390 ERROR [FSHLog-0-hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd-prefix:f4e784dc7cb5,41507,1733754397420 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,390 WARN [FSHLog-0-hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd-prefix:f4e784dc7cb5,41507,1733754397420 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,391 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,391 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C41507%2C1733754397420:(num 1733754397974) roll requested 2024-12-09T14:26:49,391 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41507%2C1733754397420.1733754409391 2024-12-09T14:26:49,397 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:49,397 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:49,397 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:49,397 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:49,397 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:49,397 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754409391 2024-12-09T14:26:49,398 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,398 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:49,399 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T14:26:49,399 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T14:26:49,399 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 2024-12-09T14:26:49,399 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33447:33447),(127.0.0.1/127.0.0.1:34729:34729)] 2024-12-09T14:26:49,399 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 is not closed yet, will try archiving it next time 2024-12-09T14:26:49,402 WARN [IPC Server handler 1 on default port 43639 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-09T14:26:49,406 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 after 5ms 2024-12-09T14:26:49,511 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:50,527 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:51,400 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:51,401 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754409391 2024-12-09T14:26:51,402 WARN [ResponseProcessor for block BP-1955786853-172.17.0.3-1733754396514:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1955786853-172.17.0.3-1733754396514:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:51,402 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754409391 block BP-1955786853-172.17.0.3-1733754396514:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:51,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:51150 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:32959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51150 dst: /127.0.0.1:32959 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:51,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:49412 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:40341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49412 dst: /127.0.0.1:40341 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:51,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ef889f8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:51,405 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51f59516{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:51,405 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:51,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ddd8f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:51,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e308c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:51,407 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:26:51,407 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1955786853-172.17.0.3-1733754396514 (Datanode Uuid 5fd1f439-9622-4b64-8599-8ba27b8bbc66) service to localhost/127.0.0.1:43639 2024-12-09T14:26:51,407 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data5/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:51,408 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data6/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:51,408 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:26:51,408 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:26:51,408 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:26:51,512 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:52,527 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:53,400 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:53,401 WARN [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]] 2024-12-09T14:26:53,401 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C41507%2C1733754397420:(num 1733754409391) roll requested 2024-12-09T14:26:53,401 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41507%2C1733754397420.1733754413401 2024-12-09T14:26:53,404 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:53,404 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:53,404 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741839_1021 2024-12-09T14:26:53,407 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 after 4008ms 2024-12-09T14:26:53,407 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:26:53,410 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:53,410 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:26:53,410 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741840_1022 2024-12-09T14:26:53,411 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:26:53,412 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:53,412 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:53,412 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741841_1023 2024-12-09T14:26:53,412 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:26:53,415 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T14:26:53,417 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:53,417 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:53,417 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:53,417 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:53,417 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:53,418 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754409391 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754413401 2024-12-09T14:26:53,418 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34729:34729),(127.0.0.1/127.0.0.1:34253:34253)] 2024-12-09T14:26:53,418 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 is not closed yet, will try archiving it next time 2024-12-09T14:26:53,418 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754409391 is not closed yet, will try archiving it next time 2024-12-09T14:26:53,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40341 is added to blk_1073741838_1020 (size=3600) 2024-12-09T14:26:53,512 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:53,820 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 is not closed yet, will try archiving it next time 2024-12-09T14:26:54,528 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:55,282 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68b08b5e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40341, datanodeUuid=7ea7564b-5716-425e-9aec-33a12de3d648, infoPort=34729, infoSecurePort=0, ipcPort=34385, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741838_1020 to 127.0.0.1:32959 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:55,419 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:55,420 WARN [ResponseProcessor for block BP-1955786853-172.17.0.3-1733754396514:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1955786853-172.17.0.3-1733754396514:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:55,420 WARN [DataStreamer for file /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754413401 block BP-1955786853-172.17.0.3-1733754396514:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:26:55,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:49420 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40341:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49420 dst: /127.0.0.1:40341 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:55,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41094 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41094 dst: /127.0.0.1:33181 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:55,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5547c7e1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:26:55,423 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78e445ac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:26:55,424 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:26:55,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc0bdb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:26:55,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719bbb9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,STOPPED} 2024-12-09T14:26:55,427 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:26:55,427 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:26:55,428 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1955786853-172.17.0.3-1733754396514 (Datanode Uuid 7ea7564b-5716-425e-9aec-33a12de3d648) service to localhost/127.0.0.1:43639 2024-12-09T14:26:55,428 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:26:55,428 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data9/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:55,429 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data10/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:26:55,429 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:26:55,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41507 {}] regionserver.HRegion(8855): Flush requested on 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:55,445 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2cb998fa9bbd6a409dda91836dbca4ce 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:26:55,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/3304f2e9ad214f2592304a75db05ac71 is 1080, key is row0002/info:/1733754411409/Put/seqid=0 2024-12-09T14:26:55,480 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:55,481 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:26:55,481 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741843_1026 2024-12-09T14:26:55,482 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:26:55,483 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:55,484 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:55,484 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741844_1027 2024-12-09T14:26:55,485 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:26:55,488 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38909 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:55,488 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41118 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741845_1028 to mirror 127.0.0.1:38909 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:55,488 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:26:55,488 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741845_1028 2024-12-09T14:26:55,488 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41118 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:26:55,488 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41118 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41118 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:55,489 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:26:55,492 WARN [Thread-918 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:55,492 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41134 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741846_1029 to mirror 127.0.0.1:39239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:55,493 WARN [Thread-918 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:55,493 WARN [Thread-918 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741846_1029 2024-12-09T14:26:55,493 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41134 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:26:55,493 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41134 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41134 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:55,493 WARN [Thread-918 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:26:55,494 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:26:55,494 WARN [IPC Server handler 1 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:26:55,495 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:26:55,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741847_1030 (size=10347) 2024-12-09T14:26:55,512 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:55,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/3304f2e9ad214f2592304a75db05ac71 2024-12-09T14:26:55,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/3304f2e9ad214f2592304a75db05ac71 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/3304f2e9ad214f2592304a75db05ac71 2024-12-09T14:26:55,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/3304f2e9ad214f2592304a75db05ac71, entries=5, sequenceid=11, filesize=10.1 K 2024-12-09T14:26:55,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 2cb998fa9bbd6a409dda91836dbca4ce in 478ms, sequenceid=11, compaction requested=false 2024-12-09T14:26:55,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2cb998fa9bbd6a409dda91836dbca4ce: 2024-12-09T14:26:56,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41507 {}] regionserver.HRegion(8855): Flush requested on 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:56,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2cb998fa9bbd6a409dda91836dbca4ce 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-09T14:26:56,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/9211efda7b0f47ef94071250256af66b is 1080, key is row0007/info:/1733754415447/Put/seqid=0 2024-12-09T14:26:56,089 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:56,090 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:26:56,090 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741848_1031 2024-12-09T14:26:56,090 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:26:56,092 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:56,092 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:56,092 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741849_1032 2024-12-09T14:26:56,093 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:26:56,094 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:56,094 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:56,094 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741850_1033 2024-12-09T14:26:56,095 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:26:56,096 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:56,096 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:26:56,096 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741851_1034 2024-12-09T14:26:56,096 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:26:56,097 WARN [IPC Server handler 4 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:26:56,097 WARN [IPC Server handler 4 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:26:56,097 WARN [IPC Server handler 4 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:26:56,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741852_1035 (size=12506) 2024-12-09T14:26:56,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/9211efda7b0f47ef94071250256af66b 2024-12-09T14:26:56,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/9211efda7b0f47ef94071250256af66b as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/9211efda7b0f47ef94071250256af66b 2024-12-09T14:26:56,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/9211efda7b0f47ef94071250256af66b, entries=7, sequenceid=24, filesize=12.2 K 2024-12-09T14:26:56,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 2cb998fa9bbd6a409dda91836dbca4ce in 432ms, sequenceid=24, compaction requested=false 2024-12-09T14:26:56,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2cb998fa9bbd6a409dda91836dbca4ce: 2024-12-09T14:26:56,515 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-09T14:26:56,515 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:56,515 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/9211efda7b0f47ef94071250256af66b because midkey is the same as first or last row 2024-12-09T14:26:56,528 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:57,419 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:57,419 WARN [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]] 2024-12-09T14:26:57,419 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C41507%2C1733754397420:(num 1733754413401) roll requested 2024-12-09T14:26:57,420 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41507%2C1733754397420.1733754417420 2024-12-09T14:26:57,424 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:57,424 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:26:57,424 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741853_1036 2024-12-09T14:26:57,424 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:26:57,426 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:57,426 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:26:57,426 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741854_1037 2024-12-09T14:26:57,427 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:26:57,428 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:57,428 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:57,428 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741855_1038 2024-12-09T14:26:57,429 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:26:57,430 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:57,430 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:57,430 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741856_1039 2024-12-09T14:26:57,431 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:26:57,431 WARN [IPC Server handler 3 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:26:57,432 WARN [IPC Server handler 3 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:26:57,432 WARN [IPC Server handler 3 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:26:57,435 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:57,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:57,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:57,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:57,435 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:57,435 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754413401 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754417420 2024-12-09T14:26:57,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741842_1025 (size=24823) 2024-12-09T14:26:57,438 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34253:34253)] 2024-12-09T14:26:57,438 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 is not closed yet, will try archiving it next time 2024-12-09T14:26:57,438 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754413401 is not closed yet, will try archiving it next time 2024-12-09T14:26:57,439 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754409391 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs/f4e784dc7cb5%2C41507%2C1733754397420.1733754409391 2024-12-09T14:26:57,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41507 {}] regionserver.HRegion(8855): Flush requested on 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:57,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2cb998fa9bbd6a409dda91836dbca4ce 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T14:26:57,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/b7d9b186844c4b029e88543e5f6b9437 is 1079, key is tmprow/info:/1733754417502/Put/seqid=0 2024-12-09T14:26:57,511 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38909 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:57,511 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41150 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741858_1041 to mirror 127.0.0.1:38909 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:57,511 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:26:57,511 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741858_1041 2024-12-09T14:26:57,511 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41150 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:26:57,511 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:41150 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41150 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:57,512 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:26:57,513 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:57,838 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 is not closed yet, will try archiving it next time 2024-12-09T14:26:58,085 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3cd7c00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741852_1035 to 127.0.0.1:39239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:58,514 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:58,515 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:58,515 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741859_1042 2024-12-09T14:26:58,516 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:26:58,519 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32959 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:58,519 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52334 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=1}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741860_1043 to mirror 127.0.0.1:32959 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:58,519 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:58,519 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741860_1043 2024-12-09T14:26:58,519 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52334 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:26:58,519 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52334 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52334 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:58,520 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:26:58,522 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:58,522 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52336 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=1}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741861_1044 to mirror 127.0.0.1:40341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:58,522 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:26:58,522 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741861_1044 2024-12-09T14:26:58,522 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52336 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:26:58,522 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52336 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52336 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:58,523 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:26:58,523 WARN [IPC Server handler 0 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:26:58,523 WARN [IPC Server handler 0 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:26:58,524 WARN [IPC Server handler 0 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:26:58,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741862_1045 (size=6027) 2024-12-09T14:26:58,529 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:58,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/b7d9b186844c4b029e88543e5f6b9437 2024-12-09T14:26:58,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/b7d9b186844c4b029e88543e5f6b9437 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/b7d9b186844c4b029e88543e5f6b9437 2024-12-09T14:26:58,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/b7d9b186844c4b029e88543e5f6b9437, entries=1, sequenceid=34, filesize=5.9 K 2024-12-09T14:26:58,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=7.35 KB/7525 for 2cb998fa9bbd6a409dda91836dbca4ce in 1438ms, sequenceid=34, compaction requested=true 2024-12-09T14:26:58,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2cb998fa9bbd6a409dda91836dbca4ce: 2024-12-09T14:26:58,941 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-09T14:26:58,941 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:58,942 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/9211efda7b0f47ef94071250256af66b because midkey is the same as first or last row 2024-12-09T14:26:58,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2cb998fa9bbd6a409dda91836dbca4ce:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:26:58,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:26:58,942 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:26:58,943 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:26:58,943 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HStore(1541): 2cb998fa9bbd6a409dda91836dbca4ce/info is initiating minor compaction (all files) 2024-12-09T14:26:58,943 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2cb998fa9bbd6a409dda91836dbca4ce/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:26:58,944 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/3304f2e9ad214f2592304a75db05ac71, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/9211efda7b0f47ef94071250256af66b, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/b7d9b186844c4b029e88543e5f6b9437] into tmpdir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp, totalSize=28.2 K 2024-12-09T14:26:58,944 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3304f2e9ad214f2592304a75db05ac71, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733754411409 2024-12-09T14:26:58,945 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9211efda7b0f47ef94071250256af66b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733754415447 2024-12-09T14:26:58,945 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7d9b186844c4b029e88543e5f6b9437, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733754417502 2024-12-09T14:26:58,959 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2cb998fa9bbd6a409dda91836dbca4ce#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:26:58,959 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/451608a3efa44d3f996b086a0ed4c1a2 is 1080, key is row0002/info:/1733754411409/Put/seqid=0 2024-12-09T14:26:58,961 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:58,961 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:26:58,961 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741863_1046 2024-12-09T14:26:58,962 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:26:58,963 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:58,963 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:58,963 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741864_1047 2024-12-09T14:26:58,964 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:26:58,965 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:58,965 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:58,965 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741865_1048 2024-12-09T14:26:58,965 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:26:58,966 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:58,966 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:26:58,966 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741866_1049 2024-12-09T14:26:58,967 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:26:58,967 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:26:58,967 WARN [IPC Server handler 1 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:26:58,968 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:26:58,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741867_1050 (size=17994) 2024-12-09T14:26:59,085 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3cd7c00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741842_1025 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:59,090 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741847_1030 to 127.0.0.1:39239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41507 {}] regionserver.HRegion(8855): Flush requested on 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:26:59,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2cb998fa9bbd6a409dda91836dbca4ce 1/1 column families, dataSize=8.40 KB heapSize=9.25 KB 2024-12-09T14:26:59,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/988c074f9e4746d6893dd2c75977df65 is 1079, key is tmprow/info:/1733754419125/Put/seqid=0 2024-12-09T14:26:59,134 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,134 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52398 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741868_1051 to mirror 127.0.0.1:40341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:59,134 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:26:59,134 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741868_1051 2024-12-09T14:26:59,134 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52398 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:26:59,134 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52398 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52398 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:59,135 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:26:59,136 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,136 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:26:59,136 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741869_1052 2024-12-09T14:26:59,136 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:26:59,137 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,138 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:59,138 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741870_1053 2024-12-09T14:26:59,138 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:26:59,139 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,139 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:59,139 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741871_1054 2024-12-09T14:26:59,139 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:26:59,140 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:26:59,140 WARN [IPC Server handler 1 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:26:59,140 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:26:59,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741872_1055 (size=6027) 2024-12-09T14:26:59,379 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/451608a3efa44d3f996b086a0ed4c1a2 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2 2024-12-09T14:26:59,386 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2cb998fa9bbd6a409dda91836dbca4ce/info of 2cb998fa9bbd6a409dda91836dbca4ce into 451608a3efa44d3f996b086a0ed4c1a2(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2cb998fa9bbd6a409dda91836dbca4ce: 2024-12-09T14:26:59,386 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce., storeName=2cb998fa9bbd6a409dda91836dbca4ce/info, priority=13, startTime=1733754418942; duration=0sec 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2 because midkey is the same as first or last row 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2 because midkey is the same as first or last row 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2 because midkey is the same as first or last row 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:26:59,386 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2cb998fa9bbd6a409dda91836dbca4ce:info 2024-12-09T14:26:59,439 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,439 WARN [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]] 2024-12-09T14:26:59,439 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C41507%2C1733754397420:(num 1733754417420) roll requested 2024-12-09T14:26:59,439 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41507%2C1733754397420.1733754419439 2024-12-09T14:26:59,442 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,442 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:26:59,442 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741873_1056 2024-12-09T14:26:59,443 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:26:59,445 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52418 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741874_1057 to mirror 127.0.0.1:32959 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:59,445 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32959 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,445 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52418 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T14:26:59,445 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:26:59,445 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741874_1057 2024-12-09T14:26:59,445 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52418 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52418 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:59,446 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:26:59,448 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52434 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741875_1058 to mirror 127.0.0.1:40341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:59,448 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:26:59,448 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741875_1058 2024-12-09T14:26:59,448 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52434 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T14:26:59,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52434 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52434 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:26:59,449 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:26:59,449 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,450 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:26:59,450 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741876_1059 2024-12-09T14:26:59,450 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:26:59,451 WARN [IPC Server handler 0 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:26:59,451 WARN [IPC Server handler 0 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:26:59,451 WARN [IPC Server handler 0 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:26:59,453 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:59,453 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:59,453 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:59,453 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:59,453 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:26:59,454 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754417420 with entries=14, filesize=12.92 KB; new WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754419439 2024-12-09T14:26:59,454 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34253:34253)] 2024-12-09T14:26:59,454 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 is not closed yet, will try archiving it next time 2024-12-09T14:26:59,454 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754417420 is not closed yet, will try archiving it next time 2024-12-09T14:26:59,455 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754413401 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs/f4e784dc7cb5%2C41507%2C1733754397420.1733754413401 2024-12-09T14:26:59,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741857_1040 (size=13234) 2024-12-09T14:26:59,513 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:26:59,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.40 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/988c074f9e4746d6893dd2c75977df65 2024-12-09T14:26:59,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/988c074f9e4746d6893dd2c75977df65 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/988c074f9e4746d6893dd2c75977df65 2024-12-09T14:26:59,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/988c074f9e4746d6893dd2c75977df65, entries=1, sequenceid=45, filesize=5.9 K 2024-12-09T14:26:59,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.40 KB/8600, heapSize ~9.23 KB/9456, currentSize=2.10 KB/2150 for 2cb998fa9bbd6a409dda91836dbca4ce in 430ms, sequenceid=45, compaction requested=false 2024-12-09T14:26:59,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2cb998fa9bbd6a409dda91836dbca4ce: 2024-12-09T14:26:59,556 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-09T14:26:59,556 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:26:59,557 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2 because midkey is the same as first or last row 2024-12-09T14:26:59,856 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 is not closed yet, will try archiving it next time 2024-12-09T14:27:00,529 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:00,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41507 {}] regionserver.HRegion(8855): Flush requested on 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:27:00,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2cb998fa9bbd6a409dda91836dbca4ce 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T14:27:00,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/8aec1f230c8c4d3a896b63e728e793d9 is 1079, key is tmprow/info:/1733754420547/Put/seqid=0 2024-12-09T14:27:00,554 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:00,554 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:27:00,555 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741878_1061 2024-12-09T14:27:00,555 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:27:00,557 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:00,557 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52452 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741879_1062 to mirror 127.0.0.1:39239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:00,557 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:27:00,557 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741879_1062 2024-12-09T14:27:00,558 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52452 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:27:00,558 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52452 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52452 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:00,558 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:27:00,559 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:00,559 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:00,559 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741880_1063 2024-12-09T14:27:00,560 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:00,561 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:00,561 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:27:00,561 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741881_1064 2024-12-09T14:27:00,561 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:27:00,562 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:27:00,562 WARN [IPC Server handler 1 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:27:00,562 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:27:00,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741882_1065 (size=6027) 2024-12-09T14:27:00,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/8aec1f230c8c4d3a896b63e728e793d9 2024-12-09T14:27:00,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/8aec1f230c8c4d3a896b63e728e793d9 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/8aec1f230c8c4d3a896b63e728e793d9 2024-12-09T14:27:00,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/8aec1f230c8c4d3a896b63e728e793d9, entries=1, sequenceid=56, filesize=5.9 K 2024-12-09T14:27:00,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 2cb998fa9bbd6a409dda91836dbca4ce in 431ms, sequenceid=56, compaction requested=true 2024-12-09T14:27:00,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2cb998fa9bbd6a409dda91836dbca4ce: 2024-12-09T14:27:00,980 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-09T14:27:00,980 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:27:00,980 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2 because midkey is the same as first or last row 2024-12-09T14:27:00,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2cb998fa9bbd6a409dda91836dbca4ce:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:27:00,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:27:00,980 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:27:00,981 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:27:00,981 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HStore(1541): 2cb998fa9bbd6a409dda91836dbca4ce/info is initiating minor compaction (all files) 2024-12-09T14:27:00,981 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2cb998fa9bbd6a409dda91836dbca4ce/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:27:00,981 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/988c074f9e4746d6893dd2c75977df65, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/8aec1f230c8c4d3a896b63e728e793d9] into tmpdir=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp, totalSize=29.3 K 2024-12-09T14:27:00,982 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.Compactor(225): Compacting 451608a3efa44d3f996b086a0ed4c1a2, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733754411409 2024-12-09T14:27:00,982 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.Compactor(225): Compacting 988c074f9e4746d6893dd2c75977df65, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733754419125 2024-12-09T14:27:00,983 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8aec1f230c8c4d3a896b63e728e793d9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733754420547 2024-12-09T14:27:00,997 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2cb998fa9bbd6a409dda91836dbca4ce#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:27:00,998 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/ada3b81eeb39429681bff606546ec9a3 is 1080, key is row0002/info:/1733754411409/Put/seqid=0 2024-12-09T14:27:01,000 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:01,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52458 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741883_1066 to mirror 127.0.0.1:40341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:01,000 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:01,000 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741883_1066 2024-12-09T14:27:01,000 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52458 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:27:01,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52458 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52458 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:01,001 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:01,003 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32959 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:01,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52470 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741884_1067 to mirror 127.0.0.1:32959 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:01,003 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:27:01,003 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741884_1067 2024-12-09T14:27:01,003 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52470 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:27:01,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52470 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52470 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:01,004 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:27:01,005 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:01,005 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK], DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]) is bad. 2024-12-09T14:27:01,005 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741885_1068 2024-12-09T14:27:01,005 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39239,DS-948aca14-e576-4534-986d-672d3db444f3,DISK] 2024-12-09T14:27:01,007 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38909 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:01,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52482 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741886_1069 to mirror 127.0.0.1:38909 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:01,008 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]) is bad. 2024-12-09T14:27:01,008 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52482 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:27:01,008 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741886_1069 2024-12-09T14:27:01,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:52482 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52482 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:01,008 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK] 2024-12-09T14:27:01,009 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T14:27:01,009 WARN [IPC Server handler 1 on default port 43639 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T14:27:01,009 WARN [IPC Server handler 1 on default port 43639 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T14:27:01,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741887_1070 (size=18097) 2024-12-09T14:27:01,086 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3cd7c00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741867_1050 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:01,086 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741862_1045 to 127.0.0.1:32959 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:01,420 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/ada3b81eeb39429681bff606546ec9a3 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/ada3b81eeb39429681bff606546ec9a3 2024-12-09T14:27:01,427 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2cb998fa9bbd6a409dda91836dbca4ce/info of 2cb998fa9bbd6a409dda91836dbca4ce into ada3b81eeb39429681bff606546ec9a3(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:27:01,427 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2cb998fa9bbd6a409dda91836dbca4ce: 2024-12-09T14:27:01,427 INFO [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce., storeName=2cb998fa9bbd6a409dda91836dbca4ce/info, priority=13, startTime=1733754420980; duration=0sec 2024-12-09T14:27:01,427 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T14:27:01,427 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:27:01,427 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/ada3b81eeb39429681bff606546ec9a3 because midkey is the same as first or last row 2024-12-09T14:27:01,427 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T14:27:01,427 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:27:01,427 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/ada3b81eeb39429681bff606546ec9a3 because midkey is the same as first or last row 2024-12-09T14:27:01,427 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T14:27:01,428 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:27:01,428 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/ada3b81eeb39429681bff606546ec9a3 because midkey is the same as first or last row 2024-12-09T14:27:01,428 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:27:01,428 DEBUG [RS:0;f4e784dc7cb5:41507-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2cb998fa9bbd6a409dda91836dbca4ce:info 2024-12-09T14:27:01,455 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:01,455 WARN [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-09T14:27:01,513 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:01,572 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:01,576 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:01,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:01,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:01,577 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:27:01,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2656efd1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:01,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6869cf12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:01,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c1aa502{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/java.io.tmpdir/jetty-localhost-34781-hadoop-hdfs-3_4_1-tests_jar-_-any-16644327440460744609/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:01,708 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6dc86a20{HTTP/1.1, (http/1.1)}{localhost:34781} 2024-12-09T14:27:01,708 INFO [Time-limited test {}] server.Server(415): Started @129566ms 2024-12-09T14:27:01,709 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:27:01,828 WARN [Thread-987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:27:01,838 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9fc09ddd6d77fb31 with lease ID 0xcc254b5549ab9877: from storage DS-948aca14-e576-4534-986d-672d3db444f3 node DatanodeRegistration(127.0.0.1:46841, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46795, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:01,839 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9fc09ddd6d77fb31 with lease ID 0xcc254b5549ab9877: from storage DS-263eb177-1825-412d-b92f-942971985085 node DatanodeRegistration(127.0.0.1:46841, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46795, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:02,086 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3cd7c00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741857_1040 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:02,086 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741872_1055 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:02,529 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:03,455 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:03,514 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:04,086 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3cd7c00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741882_1065 to 127.0.0.1:32959 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:04,086 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33181, datanodeUuid=c3f00b85-7422-48b8-a33d-bdb3efe0585e, infoPort=34253, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741887_1070 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:04,530 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:05,455 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:05,514 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:06,530 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,338 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T14:27:07,456 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,515 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,659 ERROR [FSHLog-0-hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData-prefix:f4e784dc7cb5,34437,1733754397363 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,659 WARN [FSHLog-0-hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData-prefix:f4e784dc7cb5,34437,1733754397363 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,659 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C34437%2C1733754397363:(num 1733754397547) roll requested 2024-12-09T14:27:07,659 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C34437%2C1733754397363.1733754427659 2024-12-09T14:27:07,663 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,663 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:52506 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741888_1071 to mirror 127.0.0.1:40341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:07,663 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:07,663 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741888_1071 2024-12-09T14:27:07,663 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:52506 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T14:27:07,664 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:52506 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52506 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:07,664 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:07,666 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32959 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,666 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:52522 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741889_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741889_1072 to mirror 127.0.0.1:32959 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:07,666 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK]) is bad. 2024-12-09T14:27:07,666 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741889_1072 2024-12-09T14:27:07,666 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:52522 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741889_1072] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T14:27:07,666 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:52522 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741889_1072] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52522 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:07,667 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32959,DS-7222474e-e52d-498c-8d10-fe75c711b595,DISK] 2024-12-09T14:27:07,672 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:07,672 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:07,672 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:07,672 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:07,672 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:07,672 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754427659 2024-12-09T14:27:07,673 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,673 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:07,673 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 2024-12-09T14:27:07,673 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46795:46795),(127.0.0.1/127.0.0.1:34253:34253)] 2024-12-09T14:27:07,673 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 is not closed yet, will try archiving it next time 2024-12-09T14:27:07,673 WARN [IPC Server handler 0 on default port 43639 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-12-09T14:27:07,673 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 after 0ms 2024-12-09T14:27:08,530 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:09,456 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:10,531 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:11,457 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:11,675 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 after 4002ms 2024-12-09T14:27:11,851 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@ee6f638 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:38909,null,null]) java.net.ConnectException: Call From f4e784dc7cb5/172.17.0.3 to localhost:39227 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T14:27:11,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741833_1019 (size=455) 2024-12-09T14:27:12,424 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs/f4e784dc7cb5%2C41507%2C1733754397420.1733754397974 2024-12-09T14:27:12,425 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754417420 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs/f4e784dc7cb5%2C41507%2C1733754397420.1733754417420 2024-12-09T14:27:12,531 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:12,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:27:12,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741836_1012 (size=76) 2024-12-09T14:27:13,457 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:13,834 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3fa27928[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46841, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46795, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741826_1002 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:13,834 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@57a7728d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46841, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46795, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741828_1004 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:14,531 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,350 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41507%2C1733754397420.1733754435350 2024-12-09T14:27:15,354 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,354 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:38674 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741891_1075 to mirror 127.0.0.1:40341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:15,354 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:15,354 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741891_1075 2024-12-09T14:27:15,354 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:38674 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T14:27:15,354 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-219483739_22 at /127.0.0.1:38674 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38674 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:15,354 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:15,358 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,358 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,358 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,359 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,359 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,359 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754419439 with entries=14, filesize=12.95 KB; new WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754435350 2024-12-09T14:27:15,360 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34253:34253),(127.0.0.1/127.0.0.1:46795:46795)] 2024-12-09T14:27:15,360 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.1733754419439 is not closed yet, will try archiving it next time 2024-12-09T14:27:15,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741877_1060 (size=13268) 2024-12-09T14:27:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41507 {}] regionserver.HRegion(8855): Flush requested on 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:27:15,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2cb998fa9bbd6a409dda91836dbca4ce 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T14:27:15,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/2df97e4604f8435daf9ea92473d29d0a is 1080, key is row0013/info:/1733754435361/Put/seqid=0 2024-12-09T14:27:15,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741893_1077 (size=9267) 2024-12-09T14:27:15,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741893_1077 (size=9267) 2024-12-09T14:27:15,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=67 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/2df97e4604f8435daf9ea92473d29d0a 2024-12-09T14:27:15,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/2df97e4604f8435daf9ea92473d29d0a as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/2df97e4604f8435daf9ea92473d29d0a 2024-12-09T14:27:15,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/2df97e4604f8435daf9ea92473d29d0a, entries=4, sequenceid=67, filesize=9.0 K 2024-12-09T14:27:15,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 2cb998fa9bbd6a409dda91836dbca4ce in 27ms, sequenceid=67, compaction requested=false 2024-12-09T14:27:15,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2cb998fa9bbd6a409dda91836dbca4ce: 2024-12-09T14:27:15,393 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-12-09T14:27:15,393 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:27:15,393 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/ada3b81eeb39429681bff606546ec9a3 because midkey is the same as first or last row 2024-12-09T14:27:15,457 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,458 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-09T14:27:15,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T14:27:15,583 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:27:15,583 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:27:15,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:15,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:15,583 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T14:27:15,583 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T14:27:15,583 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=341552348, stopped=false 2024-12-09T14:27:15,583 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f4e784dc7cb5,34437,1733754397363 2024-12-09T14:27:15,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:27:15,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:27:15,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:27:15,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:15,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:15,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:15,585 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:27:15,586 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:27:15,586 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:27:15,586 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:15,586 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f4e784dc7cb5,41507,1733754397420' ***** 2024-12-09T14:27:15,586 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T14:27:15,586 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f4e784dc7cb5,41215,1733754398471' ***** 2024-12-09T14:27:15,586 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T14:27:15,586 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:27:15,586 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T14:27:15,586 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T14:27:15,586 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T14:27:15,586 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T14:27:15,587 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:27:15,587 INFO [RS:1;f4e784dc7cb5:41215 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T14:27:15,587 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:27:15,587 INFO [RS:1;f4e784dc7cb5:41215 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T14:27:15,587 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(959): stopping server f4e784dc7cb5,41215,1733754398471 2024-12-09T14:27:15,587 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:27:15,587 INFO [RS:1;f4e784dc7cb5:41215 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;f4e784dc7cb5:41215. 2024-12-09T14:27:15,587 DEBUG [RS:1;f4e784dc7cb5:41215 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:27:15,587 DEBUG [RS:1;f4e784dc7cb5:41215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:15,587 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(976): stopping server f4e784dc7cb5,41215,1733754398471; all regions closed. 2024-12-09T14:27:15,587 INFO [RS:0;f4e784dc7cb5:41507 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T14:27:15,587 INFO [RS:0;f4e784dc7cb5:41507 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T14:27:15,588 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(3091): Received CLOSE for 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:27:15,588 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,588 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,588 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,588 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,588 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,592 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(959): stopping server f4e784dc7cb5,41507,1733754397420 2024-12-09T14:27:15,592 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:27:15,592 INFO [RS:0;f4e784dc7cb5:41507 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f4e784dc7cb5:41507. 2024-12-09T14:27:15,592 DEBUG [RS:0;f4e784dc7cb5:41507 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:27:15,592 DEBUG [RS:0;f4e784dc7cb5:41507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:15,592 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2cb998fa9bbd6a409dda91836dbca4ce, disabling compactions & flushes 2024-12-09T14:27:15,592 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:27:15,592 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T14:27:15,592 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:27:15,592 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T14:27:15,592 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,592 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T14:27:15,592 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. after waiting 0 ms 2024-12-09T14:27:15,592 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:27:15,592 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T14:27:15,592 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,592 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 2024-12-09T14:27:15,592 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 2cb998fa9bbd6a409dda91836dbca4ce 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-09T14:27:15,592 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T14:27:15,593 DEBUG [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 2cb998fa9bbd6a409dda91836dbca4ce=TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.} 2024-12-09T14:27:15,593 DEBUG [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2cb998fa9bbd6a409dda91836dbca4ce 2024-12-09T14:27:15,593 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:27:15,593 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:27:15,593 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:27:15,593 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:27:15,593 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:27:15,593 WARN [IPC Server handler 3 on default port 43639 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 has not been closed. Lease recovery is in progress. RecoveryId = 1078 for block blk_1073741837_1013 2024-12-09T14:27:15,593 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-09T14:27:15,593 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 after 1ms 2024-12-09T14:27:15,593 ERROR [FSHLog-0-hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd-prefix:f4e784dc7cb5,41507,1733754397420.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,593 WARN [FSHLog-0-hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd-prefix:f4e784dc7cb5,41507,1733754397420.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,594 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C41507%2C1733754397420.meta:.meta(num 1733754398281) roll requested 2024-12-09T14:27:15,594 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754435594.meta 2024-12-09T14:27:15,597 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,597 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:46841,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:15,597 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741894_1079 2024-12-09T14:27:15,597 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/3e3484dfa6064617a34517f593708899 is 1080, key is row0016/info:/1733754435367/Put/seqid=0 2024-12-09T14:27:15,597 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:15,600 WARN [Thread-1038 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40341 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:38714 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741895_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8]'}, localName='127.0.0.1:33181', datanodeUuid='c3f00b85-7422-48b8-a33d-bdb3efe0585e', xmitsInProgress=0}:Exception transferring block BP-1955786853-172.17.0.3-1733754396514:blk_1073741895_1080 to mirror 127.0.0.1:40341 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:15,600 WARN [Thread-1038 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:15,600 WARN [Thread-1038 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741895_1080 2024-12-09T14:27:15,600 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:38714 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741895_1080] {}] datanode.BlockReceiver(316): Block 1073741895 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T14:27:15,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_373293175_22 at /127.0.0.1:38714 [Receiving block BP-1955786853-172.17.0.3-1733754396514:blk_1073741895_1080] {}] datanode.DataXceiver(331): 127.0.0.1:33181:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38714 dst: /127.0.0.1:33181 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:15,601 WARN [Thread-1038 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:15,605 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,605 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,605 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,605 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,606 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,606 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754435594.meta 2024-12-09T14:27:15,613 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,613 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38909,DS-7e267eb6-25af-47df-82ae-b4bebe68e807,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,613 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta 2024-12-09T14:27:15,614 WARN [IPC Server handler 1 on default port 43639 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta has not been closed. Lease recovery is in progress. RecoveryId = 1083 for block blk_1073741834_1010 2024-12-09T14:27:15,614 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46795:46795),(127.0.0.1/127.0.0.1:34253:34253)] 2024-12-09T14:27:15,614 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta is not closed yet, will try archiving it next time 2024-12-09T14:27:15,614 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta after 1ms 2024-12-09T14:27:15,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741897_1082 (size=13583) 2024-12-09T14:27:15,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741897_1082 (size=13583) 2024-12-09T14:27:15,615 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/3e3484dfa6064617a34517f593708899 2024-12-09T14:27:15,622 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/.tmp/info/3e3484dfa6064617a34517f593708899 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/3e3484dfa6064617a34517f593708899 2024-12-09T14:27:15,627 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/3e3484dfa6064617a34517f593708899, entries=8, sequenceid=78, filesize=13.3 K 2024-12-09T14:27:15,629 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 2cb998fa9bbd6a409dda91836dbca4ce in 37ms, sequenceid=78, compaction requested=true 2024-12-09T14:27:15,629 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/3304f2e9ad214f2592304a75db05ac71, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/9211efda7b0f47ef94071250256af66b, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/b7d9b186844c4b029e88543e5f6b9437, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/988c074f9e4746d6893dd2c75977df65, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/8aec1f230c8c4d3a896b63e728e793d9] to archive 2024-12-09T14:27:15,631 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T14:27:15,632 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/3304f2e9ad214f2592304a75db05ac71 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/3304f2e9ad214f2592304a75db05ac71 2024-12-09T14:27:15,634 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/9211efda7b0f47ef94071250256af66b to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/9211efda7b0f47ef94071250256af66b 2024-12-09T14:27:15,635 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/451608a3efa44d3f996b086a0ed4c1a2 2024-12-09T14:27:15,637 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/b7d9b186844c4b029e88543e5f6b9437 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/b7d9b186844c4b029e88543e5f6b9437 2024-12-09T14:27:15,637 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/info/a1ebe7da21f449d785bb6378ece3745a is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce./info:regioninfo/1733754398938/Put/seqid=0 2024-12-09T14:27:15,638 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/988c074f9e4746d6893dd2c75977df65 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/988c074f9e4746d6893dd2c75977df65 2024-12-09T14:27:15,639 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,639 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:15,639 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741898_1084 2024-12-09T14:27:15,640 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/8aec1f230c8c4d3a896b63e728e793d9 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/info/8aec1f230c8c4d3a896b63e728e793d9 2024-12-09T14:27:15,640 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:15,640 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f4e784dc7cb5:34437 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T14:27:15,640 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3304f2e9ad214f2592304a75db05ac71=10347, 9211efda7b0f47ef94071250256af66b=12506, 451608a3efa44d3f996b086a0ed4c1a2=17994, b7d9b186844c4b029e88543e5f6b9437=6027, 988c074f9e4746d6893dd2c75977df65=6027, 8aec1f230c8c4d3a896b63e728e793d9=6027] 2024-12-09T14:27:15,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741899_1085 (size=7089) 2024-12-09T14:27:15,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741899_1085 (size=7089) 2024-12-09T14:27:15,646 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2cb998fa9bbd6a409dda91836dbca4ce/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-12-09T14:27:15,646 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/info/a1ebe7da21f449d785bb6378ece3745a 2024-12-09T14:27:15,647 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:27:15,647 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2cb998fa9bbd6a409dda91836dbca4ce: Waiting for close lock at 1733754435592Running coprocessor pre-close hooks at 1733754435592Disabling compacts and flushes for region at 1733754435592Disabling writes for close at 1733754435592Obtaining lock to block concurrent updates at 1733754435592Preparing flush snapshotting stores in 2cb998fa9bbd6a409dda91836dbca4ce at 1733754435592Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1733754435593 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. at 1733754435594 (+1 ms)Flushing 2cb998fa9bbd6a409dda91836dbca4ce/info: creating writer at 1733754435594Flushing 2cb998fa9bbd6a409dda91836dbca4ce/info: appending metadata at 1733754435597 (+3 ms)Flushing 2cb998fa9bbd6a409dda91836dbca4ce/info: closing flushed file at 1733754435597Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@280f4d7d: reopening flushed file at 1733754435621 (+24 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 2cb998fa9bbd6a409dda91836dbca4ce in 37ms, sequenceid=78, compaction requested=true at 1733754435629 (+8 ms)Writing region close event to WAL at 1733754435641 (+12 ms)Running coprocessor post-close hooks at 1733754435646 (+5 ms)Closed at 1733754435646 2024-12-09T14:27:15,647 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733754398562.2cb998fa9bbd6a409dda91836dbca4ce. 2024-12-09T14:27:15,668 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/ns/057ca64c973f49bc9466021f8b6e2497 is 43, key is default/ns:d/1733754398332/Put/seqid=0 2024-12-09T14:27:15,670 WARN [Thread-1057 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,671 WARN [Thread-1057 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:46841,DS-948aca14-e576-4534-986d-672d3db444f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:15,671 WARN [Thread-1057 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741900_1086 2024-12-09T14:27:15,671 WARN [Thread-1057 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:15,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741901_1087 (size=5153) 2024-12-09T14:27:15,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741901_1087 (size=5153) 2024-12-09T14:27:15,676 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/ns/057ca64c973f49bc9466021f8b6e2497 2024-12-09T14:27:15,698 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/table/c89c0e675dc34940b5d46e0db3d69f38 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733754398948/Put/seqid=0 2024-12-09T14:27:15,700 WARN [Thread-1063 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:15,700 WARN [Thread-1063 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1955786853-172.17.0.3-1733754396514:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK], DatanodeInfoWithStorage[127.0.0.1:33181,DS-2bd456eb-2987-497c-8905-4673ca0fc0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK]) is bad. 2024-12-09T14:27:15,701 WARN [Thread-1063 {}] hdfs.DataStreamer(1850): Abandoning BP-1955786853-172.17.0.3-1733754396514:blk_1073741902_1088 2024-12-09T14:27:15,701 WARN [Thread-1063 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40341,DS-9b9d6237-dc93-4c11-ad08-f66a13ef1a35,DISK] 2024-12-09T14:27:15,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741903_1089 (size=5424) 2024-12-09T14:27:15,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741903_1089 (size=5424) 2024-12-09T14:27:15,706 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/table/c89c0e675dc34940b5d46e0db3d69f38 2024-12-09T14:27:15,712 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/info/a1ebe7da21f449d785bb6378ece3745a as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/info/a1ebe7da21f449d785bb6378ece3745a 2024-12-09T14:27:15,718 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/info/a1ebe7da21f449d785bb6378ece3745a, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T14:27:15,719 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/ns/057ca64c973f49bc9466021f8b6e2497 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/ns/057ca64c973f49bc9466021f8b6e2497 2024-12-09T14:27:15,724 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/ns/057ca64c973f49bc9466021f8b6e2497, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T14:27:15,725 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/.tmp/table/c89c0e675dc34940b5d46e0db3d69f38 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/table/c89c0e675dc34940b5d46e0db3d69f38 2024-12-09T14:27:15,730 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/table/c89c0e675dc34940b5d46e0db3d69f38, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T14:27:15,731 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-12-09T14:27:15,737 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T14:27:15,738 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:27:15,738 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:27:15,738 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754435593Running coprocessor pre-close hooks at 1733754435593Disabling compacts and flushes for region at 1733754435593Disabling writes for close at 1733754435593Obtaining lock to block concurrent updates at 1733754435593Preparing flush snapshotting stores in 1588230740 at 1733754435593Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733754435593Flushing stores of hbase:meta,,1.1588230740 at 1733754435615 (+22 ms)Flushing 1588230740/info: creating writer at 1733754435615Flushing 1588230740/info: appending metadata at 1733754435637 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733754435637Flushing 1588230740/ns: creating writer at 1733754435652 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733754435668 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733754435668Flushing 1588230740/table: creating writer at 1733754435682 (+14 ms)Flushing 1588230740/table: appending metadata at 1733754435698 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733754435698Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c5efffc: reopening flushed file at 1733754435711 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58554676: reopening flushed file at 1733754435718 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24d2f8de: reopening flushed file at 1733754435724 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1733754435732 (+8 ms)Writing region close event to WAL at 1733754435733 (+1 ms)Running coprocessor post-close hooks at 1733754435738 (+5 ms)Closed at 1733754435738 2024-12-09T14:27:15,739 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T14:27:15,793 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(976): stopping server f4e784dc7cb5,41507,1733754397420; all regions closed. 2024-12-09T14:27:15,793 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,793 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,794 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,794 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,794 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:15,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741896_1081 (size=825) 2024-12-09T14:27:15,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741896_1081 (size=825) 2024-12-09T14:27:15,800 INFO [regionserver/f4e784dc7cb5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:27:15,851 INFO [regionserver/f4e784dc7cb5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T14:27:15,852 INFO [regionserver/f4e784dc7cb5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T14:27:16,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741877_1060 (size=13268) 2024-12-09T14:27:16,528 INFO [regionserver/f4e784dc7cb5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:27:16,551 INFO [regionserver/f4e784dc7cb5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T14:27:16,551 INFO [regionserver/f4e784dc7cb5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T14:27:16,834 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@57a7728d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46841, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46795, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741829_1005 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:16,834 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3fa27928[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46841, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46795, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741827_1003 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:17,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T14:27:17,823 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:27:17,823 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T14:27:17,834 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@57a7728d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46841, datanodeUuid=e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9, infoPort=46795, infoSecurePort=0, ipcPort=42541, storageInfo=lv=-57;cid=testClusterID;nsid=1854195670;c=1733754396514):Failed to transfer BP-1955786853-172.17.0.3-1733754396514:blk_1073741833_1019 to 127.0.0.1:40341 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:17,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:27:18,350 INFO [master/f4e784dc7cb5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T14:27:18,350 INFO [master/f4e784dc7cb5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T14:27:18,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:27:18,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741835_1011 (size=393) 2024-12-09T14:27:19,594 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 after 4002ms 2024-12-09T14:27:19,615 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta after 4002ms 2024-12-09T14:27:20,592 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T14:27:20,595 DEBUG [RS:1;f4e784dc7cb5:41215 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C41215%2C1733754398471:(num 1733754398662) 2024-12-09T14:27:20,595 DEBUG [RS:1;f4e784dc7cb5:41215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.ChoreService(370): Chore service for: regionserver/f4e784dc7cb5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T14:27:20,595 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:27:20,595 INFO [RS:1;f4e784dc7cb5:41215 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41215 2024-12-09T14:27:20,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f4e784dc7cb5,41215,1733754398471 2024-12-09T14:27:20,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:27:20,598 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:27:20,600 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f4e784dc7cb5,41215,1733754398471] 2024-12-09T14:27:20,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:20,601 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f4e784dc7cb5,41215,1733754398471 already deleted, retry=false 2024-12-09T14:27:20,602 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f4e784dc7cb5,41215,1733754398471 expired; onlineServers=1 2024-12-09T14:27:20,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:20,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:20,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:20,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:20,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:20,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:20,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:20,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:20,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:20,700 INFO [RS:1;f4e784dc7cb5:41215 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:27:20,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x1012b9432be0002, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:20,700 INFO [RS:1;f4e784dc7cb5:41215 {}] regionserver.HRegionServer(1031): Exiting; stopping=f4e784dc7cb5,41215,1733754398471; zookeeper connection closed. 2024-12-09T14:27:20,701 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7d37e119 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7d37e119 2024-12-09T14:27:20,794 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T14:27:20,798 DEBUG [RS:0;f4e784dc7cb5:41507 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs 2024-12-09T14:27:20,798 INFO [RS:0;f4e784dc7cb5:41507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C41507%2C1733754397420.meta:.meta(num 1733754435594) 2024-12-09T14:27:20,798 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:20,798 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:20,798 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:20,799 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:20,799 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:20,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741892_1076 (size=14682) 2024-12-09T14:27:20,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741892_1076 (size=14682) 2024-12-09T14:27:20,804 DEBUG [RS:0;f4e784dc7cb5:41507 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs 2024-12-09T14:27:20,804 INFO [RS:0;f4e784dc7cb5:41507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C41507%2C1733754397420:(num 1733754435350) 2024-12-09T14:27:20,804 DEBUG [RS:0;f4e784dc7cb5:41507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:20,804 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:27:20,804 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:27:20,805 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.ChoreService(370): Chore service for: regionserver/f4e784dc7cb5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T14:27:20,805 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:27:20,805 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:27:20,805 INFO [RS:0;f4e784dc7cb5:41507 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41507 2024-12-09T14:27:20,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f4e784dc7cb5,41507,1733754397420 2024-12-09T14:27:20,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:27:20,808 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:27:20,809 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f4e784dc7cb5,41507,1733754397420] 2024-12-09T14:27:20,810 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f4e784dc7cb5,41507,1733754397420 already deleted, retry=false 2024-12-09T14:27:20,810 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f4e784dc7cb5,41507,1733754397420 expired; onlineServers=0 2024-12-09T14:27:20,810 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f4e784dc7cb5,34437,1733754397363' ***** 2024-12-09T14:27:20,810 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T14:27:20,810 INFO [M:0;f4e784dc7cb5:34437 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:27:20,811 INFO [M:0;f4e784dc7cb5:34437 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:27:20,811 DEBUG [M:0;f4e784dc7cb5:34437 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T14:27:20,811 DEBUG [M:0;f4e784dc7cb5:34437 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T14:27:20,811 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T14:27:20,811 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754397659 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754397659,5,FailOnTimeoutGroup] 2024-12-09T14:27:20,811 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754397659 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754397659,5,FailOnTimeoutGroup] 2024-12-09T14:27:20,811 INFO [M:0;f4e784dc7cb5:34437 {}] hbase.ChoreService(370): Chore service for: master/f4e784dc7cb5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T14:27:20,811 INFO [M:0;f4e784dc7cb5:34437 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:27:20,811 DEBUG [M:0;f4e784dc7cb5:34437 {}] master.HMaster(1795): Stopping service threads 2024-12-09T14:27:20,811 INFO [M:0;f4e784dc7cb5:34437 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T14:27:20,811 INFO [M:0;f4e784dc7cb5:34437 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:27:20,812 INFO [M:0;f4e784dc7cb5:34437 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T14:27:20,812 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T14:27:20,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T14:27:20,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:20,813 DEBUG [M:0;f4e784dc7cb5:34437 {}] zookeeper.ZKUtil(347): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T14:27:20,813 WARN [M:0;f4e784dc7cb5:34437 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T14:27:20,813 INFO [M:0;f4e784dc7cb5:34437 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/.lastflushedseqids 2024-12-09T14:27:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741904_1090 (size=130) 2024-12-09T14:27:20,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741904_1090 (size=130) 2024-12-09T14:27:20,819 INFO [M:0;f4e784dc7cb5:34437 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T14:27:20,819 INFO [M:0;f4e784dc7cb5:34437 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T14:27:20,820 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:27:20,820 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:20,820 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:20,820 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:27:20,820 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:20,820 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-12-09T14:27:20,840 DEBUG [M:0;f4e784dc7cb5:34437 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f989a382c8ce4ed386a969866ed1dd16 is 82, key is hbase:meta,,1/info:regioninfo/1733754398312/Put/seqid=0 2024-12-09T14:27:20,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741905_1091 (size=5672) 2024-12-09T14:27:20,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741905_1091 (size=5672) 2024-12-09T14:27:20,847 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f989a382c8ce4ed386a969866ed1dd16 2024-12-09T14:27:20,874 DEBUG [M:0;f4e784dc7cb5:34437 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b77c147a5f64b2a8f10e7e8e283c89a is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733754398954/Put/seqid=0 2024-12-09T14:27:20,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741906_1092 (size=6256) 2024-12-09T14:27:20,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741906_1092 (size=6256) 2024-12-09T14:27:20,885 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b77c147a5f64b2a8f10e7e8e283c89a 2024-12-09T14:27:20,890 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2b77c147a5f64b2a8f10e7e8e283c89a 2024-12-09T14:27:20,909 DEBUG [M:0;f4e784dc7cb5:34437 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cecd4ffda47043e28cf908cce845a7a3 is 69, key is f4e784dc7cb5,41215,1733754398471/rs:state/1733754398514/Put/seqid=0 2024-12-09T14:27:20,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:20,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41507-0x1012b9432be0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:20,909 INFO [RS:0;f4e784dc7cb5:41507 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:27:20,909 INFO [RS:0;f4e784dc7cb5:41507 {}] regionserver.HRegionServer(1031): Exiting; stopping=f4e784dc7cb5,41507,1733754397420; zookeeper connection closed. 2024-12-09T14:27:20,910 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@663cc6df {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@663cc6df 2024-12-09T14:27:20,910 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-09T14:27:20,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741907_1093 (size=5224) 2024-12-09T14:27:20,916 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cecd4ffda47043e28cf908cce845a7a3 2024-12-09T14:27:20,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741907_1093 (size=5224) 2024-12-09T14:27:20,948 DEBUG [M:0;f4e784dc7cb5:34437 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eb23d0ee8264f779a736292e34d5bb3 is 52, key is load_balancer_on/state:d/1733754398454/Put/seqid=0 2024-12-09T14:27:20,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741908_1094 (size=5056) 2024-12-09T14:27:20,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741908_1094 (size=5056) 2024-12-09T14:27:20,955 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eb23d0ee8264f779a736292e34d5bb3 2024-12-09T14:27:20,961 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f989a382c8ce4ed386a969866ed1dd16 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f989a382c8ce4ed386a969866ed1dd16 2024-12-09T14:27:20,967 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f989a382c8ce4ed386a969866ed1dd16, entries=8, sequenceid=60, filesize=5.5 K 2024-12-09T14:27:20,968 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b77c147a5f64b2a8f10e7e8e283c89a as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2b77c147a5f64b2a8f10e7e8e283c89a 2024-12-09T14:27:20,973 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2b77c147a5f64b2a8f10e7e8e283c89a 2024-12-09T14:27:20,973 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2b77c147a5f64b2a8f10e7e8e283c89a, entries=6, sequenceid=60, filesize=6.1 K 2024-12-09T14:27:20,975 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cecd4ffda47043e28cf908cce845a7a3 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cecd4ffda47043e28cf908cce845a7a3 2024-12-09T14:27:20,980 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cecd4ffda47043e28cf908cce845a7a3, entries=2, sequenceid=60, filesize=5.1 K 2024-12-09T14:27:20,981 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4eb23d0ee8264f779a736292e34d5bb3 as hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4eb23d0ee8264f779a736292e34d5bb3 2024-12-09T14:27:20,987 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4eb23d0ee8264f779a736292e34d5bb3, entries=1, sequenceid=60, filesize=4.9 K 2024-12-09T14:27:20,989 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 168ms, sequenceid=60, compaction requested=false 2024-12-09T14:27:20,999 INFO [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:20,999 DEBUG [M:0;f4e784dc7cb5:34437 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754440820Disabling compacts and flushes for region at 1733754440820Disabling writes for close at 1733754440820Obtaining lock to block concurrent updates at 1733754440820Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733754440820Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1733754440821 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733754440821Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733754440821Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733754440840 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733754440840Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733754440854 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733754440873 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733754440873Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733754440890 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733754440908 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733754440908Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733754440925 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733754440947 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733754440947Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7973d6c2: reopening flushed file at 1733754440960 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bc67302: reopening flushed file at 1733754440967 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42381745: reopening flushed file at 1733754440974 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e46f24a: reopening flushed file at 1733754440981 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 168ms, sequenceid=60, compaction requested=false at 1733754440989 (+8 ms)Writing region close event to WAL at 1733754440999 (+10 ms)Closed at 1733754440999 2024-12-09T14:27:21,000 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:21,000 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:21,000 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:21,000 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:21,000 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:21,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33181 is added to blk_1073741890_1073 (size=1045) 2024-12-09T14:27:21,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46841 is added to blk_1073741890_1073 (size=1045) 2024-12-09T14:27:21,180 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:27:21,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:21,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:21,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:21,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:21,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:21,193 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:21,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:21,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:21,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:21,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:21,855 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2f09a32e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1955786853-172.17.0.3-1733754396514:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:38909,null,null]) java.net.ConnectException: Call From f4e784dc7cb5/172.17.0.3 to localhost:39227 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T14:27:22,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:22,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:22,683 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/WALs/f4e784dc7cb5,34437,1733754397363/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/oldWALs/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 2024-12-09T14:27:22,688 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/MasterData/oldWALs/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547 to hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/oldWALs/f4e784dc7cb5%2C34437%2C1733754397363.1733754397547$masterlocalwal$ 2024-12-09T14:27:22,688 INFO [M:0;f4e784dc7cb5:34437 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T14:27:22,688 INFO [M:0;f4e784dc7cb5:34437 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34437 2024-12-09T14:27:22,688 INFO [M:0;f4e784dc7cb5:34437 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:27:22,689 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:27:22,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:22,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34437-0x1012b9432be0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:22,792 INFO [M:0;f4e784dc7cb5:34437 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:27:22,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c1aa502{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:22,823 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6dc86a20{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:22,823 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:22,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6869cf12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:22,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2656efd1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:22,826 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:27:22,826 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:27:22,826 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:27:22,826 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1955786853-172.17.0.3-1733754396514 (Datanode Uuid e2ff4de6-f4cd-4aa4-be32-99f0cc2435d9) service to localhost/127.0.0.1:43639 2024-12-09T14:27:22,827 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data3/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:22,827 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data4/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:22,827 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6ec90977 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:38909,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39227 , LocalHost:localPort f4e784dc7cb5/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T14:27:22,827 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6ec90977 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1955786853-172.17.0.3-1733754396514:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:46841,null,null], DatanodeInfoWithStorage[127.0.0.1:38909,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1955786853-172.17.0.3-1733754396514 2024-12-09T14:27:22,827 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:27:22,827 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6ec90977 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38909,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1955786853-172.17.0.3-1733754396514 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:22,828 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6ec90977 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46841,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1955786853-172.17.0.3-1733754396514 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:22,828 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6ec90977 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38909,null,null], DatanodeInfoWithStorage[127.0.0.1:46841,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1955786853-172.17.0.3-1733754396514:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:38909,null,null], DatanodeInfoWithStorage[127.0.0.1:46841,null,null]] 2024-12-09T14:27:22,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58872ac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:22,844 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3afd309b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:22,844 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:22,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@83c4e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:22,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2281f2b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:22,848 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:27:22,848 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:27:22,850 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:27:22,850 WARN [BP-1955786853-172.17.0.3-1733754396514 heartbeating to localhost/127.0.0.1:43639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1955786853-172.17.0.3-1733754396514 (Datanode Uuid c3f00b85-7422-48b8-a33d-bdb3efe0585e) service to localhost/127.0.0.1:43639 2024-12-09T14:27:22,851 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data7/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:22,852 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/cluster_272ea0ce-296a-b46d-2f51-dda5d9c3b989/data/data8/current/BP-1955786853-172.17.0.3-1733754396514 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:22,852 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:27:22,866 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77cd23f0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:27:22,867 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3ed3a961{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:22,867 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:22,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@142d24a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:22,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e7d5546{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:22,886 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T14:27:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T14:27:22,949 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37339 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:43639 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:43639 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:37339 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43639 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43639 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43639 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43639 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$901/0x00007fab7cbf5e20.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$901/0x00007fab7cbf5e20.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43639 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=147 (was 163), ProcessCount=11 (was 11), AvailableMemoryMB=5984 (was 5709) - AvailableMemoryMB LEAK? - 2024-12-09T14:27:22,960 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=147, ProcessCount=11, AvailableMemoryMB=5984 2024-12-09T14:27:22,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T14:27:22,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.log.dir so I do NOT create it in target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b 2024-12-09T14:27:22,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/71ffa0a2-8f06-2e27-3ba0-39f10656ceba/hadoop.tmp.dir so I do NOT create it in target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b 2024-12-09T14:27:22,960 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b, deleteOnExit=true 2024-12-09T14:27:22,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T14:27:22,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/test.cache.data in system properties and HBase conf 2024-12-09T14:27:22,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T14:27:22,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir in system properties and HBase conf 2024-12-09T14:27:22,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T14:27:22,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T14:27:22,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T14:27:22,962 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T14:27:22,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:27:22,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:27:22,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T14:27:22,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/nfs.dump.dir in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/java.io.tmpdir in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:27:22,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T14:27:22,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T14:27:22,981 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:27:23,062 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:23,068 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:23,075 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:23,075 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:23,075 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:27:23,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:23,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@594180cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:23,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73ee6be8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:23,234 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49c2791f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/java.io.tmpdir/jetty-localhost-46373-hadoop-hdfs-3_4_1-tests_jar-_-any-15137452081826315759/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:27:23,235 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65e5bfc{HTTP/1.1, (http/1.1)}{localhost:46373} 2024-12-09T14:27:23,235 INFO [Time-limited test {}] server.Server(415): Started @151093ms 2024-12-09T14:27:23,251 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:27:23,360 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:23,365 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:23,367 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:23,367 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:23,367 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:27:23,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@482b9b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:23,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f0827f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:23,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d9c9e99{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/java.io.tmpdir/jetty-localhost-37607-hadoop-hdfs-3_4_1-tests_jar-_-any-14140576708479563660/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:23,535 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5493c38f{HTTP/1.1, (http/1.1)}{localhost:37607} 2024-12-09T14:27:23,536 INFO [Time-limited test {}] server.Server(415): Started @151394ms 2024-12-09T14:27:23,538 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:27:23,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:23,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:23,623 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:23,629 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:23,639 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:23,639 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:23,639 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:27:23,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ea97727{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:23,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2efee71d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:23,697 WARN [Thread-1182 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data1/current/BP-24484884-172.17.0.3-1733754442999/current, will proceed with Du for space computation calculation, 2024-12-09T14:27:23,707 WARN [Thread-1183 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data2/current/BP-24484884-172.17.0.3-1733754442999/current, will proceed with Du for space computation calculation, 2024-12-09T14:27:23,744 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:27:23,749 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x89f4888f95007a43 with lease ID 0x9276d1f5412f34bd: Processing first storage report for DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e from datanode DatanodeRegistration(127.0.0.1:33915, datanodeUuid=faed1f47-158a-41f2-b774-1ed84e43e2b1, infoPort=41207, infoSecurePort=0, ipcPort=42963, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999) 2024-12-09T14:27:23,749 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x89f4888f95007a43 with lease ID 0x9276d1f5412f34bd: from storage DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e node DatanodeRegistration(127.0.0.1:33915, datanodeUuid=faed1f47-158a-41f2-b774-1ed84e43e2b1, infoPort=41207, infoSecurePort=0, ipcPort=42963, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:23,749 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x89f4888f95007a43 with lease ID 0x9276d1f5412f34bd: Processing first storage report for DS-7af7881e-c735-44fc-b259-2b7dc24ee1ae from datanode DatanodeRegistration(127.0.0.1:33915, datanodeUuid=faed1f47-158a-41f2-b774-1ed84e43e2b1, infoPort=41207, infoSecurePort=0, ipcPort=42963, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999) 2024-12-09T14:27:23,750 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x89f4888f95007a43 with lease ID 0x9276d1f5412f34bd: from storage DS-7af7881e-c735-44fc-b259-2b7dc24ee1ae node DatanodeRegistration(127.0.0.1:33915, datanodeUuid=faed1f47-158a-41f2-b774-1ed84e43e2b1, infoPort=41207, infoSecurePort=0, ipcPort=42963, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T14:27:23,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18940221{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/java.io.tmpdir/jetty-localhost-33769-hadoop-hdfs-3_4_1-tests_jar-_-any-2942692999949742648/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:23,790 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c6cb23c{HTTP/1.1, (http/1.1)}{localhost:33769} 2024-12-09T14:27:23,791 INFO [Time-limited test {}] server.Server(415): Started @151648ms 2024-12-09T14:27:23,792 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:27:23,907 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data4/current/BP-24484884-172.17.0.3-1733754442999/current, will proceed with Du for space computation calculation, 2024-12-09T14:27:23,907 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data3/current/BP-24484884-172.17.0.3-1733754442999/current, will proceed with Du for space computation calculation, 2024-12-09T14:27:23,936 WARN [Thread-1197 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:27:23,939 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe8163f77531268ca with lease ID 0x9276d1f5412f34be: Processing first storage report for DS-68ed176b-cc2f-4055-bdb2-972a31e67590 from datanode DatanodeRegistration(127.0.0.1:36437, datanodeUuid=a39989dc-c6cb-4f27-9945-581b0e757113, infoPort=41001, infoSecurePort=0, ipcPort=37197, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999) 2024-12-09T14:27:23,939 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe8163f77531268ca with lease ID 0x9276d1f5412f34be: from storage DS-68ed176b-cc2f-4055-bdb2-972a31e67590 node DatanodeRegistration(127.0.0.1:36437, datanodeUuid=a39989dc-c6cb-4f27-9945-581b0e757113, infoPort=41001, infoSecurePort=0, ipcPort=37197, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:23,939 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe8163f77531268ca with lease ID 0x9276d1f5412f34be: Processing first storage report for DS-de1175f2-b1b7-4524-9740-cac2fe07ff60 from datanode DatanodeRegistration(127.0.0.1:36437, datanodeUuid=a39989dc-c6cb-4f27-9945-581b0e757113, infoPort=41001, infoSecurePort=0, ipcPort=37197, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999) 2024-12-09T14:27:23,939 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe8163f77531268ca with lease ID 0x9276d1f5412f34be: from storage DS-de1175f2-b1b7-4524-9740-cac2fe07ff60 node DatanodeRegistration(127.0.0.1:36437, datanodeUuid=a39989dc-c6cb-4f27-9945-581b0e757113, infoPort=41001, infoSecurePort=0, ipcPort=37197, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:24,027 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b 2024-12-09T14:27:24,030 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/zookeeper_0, clientPort=52626, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T14:27:24,031 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52626 2024-12-09T14:27:24,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:27:24,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:27:24,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:27:24,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:27:24,045 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a with version=8 2024-12-09T14:27:24,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase-staging 2024-12-09T14:27:24,048 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:27:24,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:27:24,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:27:24,048 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:27:24,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:27:24,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:27:24,048 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T14:27:24,049 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:27:24,050 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39993 2024-12-09T14:27:24,052 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39993 connecting to ZooKeeper ensemble=127.0.0.1:52626 2024-12-09T14:27:24,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:399930x0, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:27:24,059 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39993-0x1012b94e91d0000 connected 2024-12-09T14:27:24,082 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:27:24,084 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:27:24,091 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:27:24,091 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a, hbase.cluster.distributed=false 2024-12-09T14:27:24,094 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:27:24,097 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39993 2024-12-09T14:27:24,097 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39993 2024-12-09T14:27:24,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39993 2024-12-09T14:27:24,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39993 2024-12-09T14:27:24,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39993 2024-12-09T14:27:24,126 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:27:24,126 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:27:24,126 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:27:24,126 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:27:24,126 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:27:24,126 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:27:24,126 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T14:27:24,127 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:27:24,128 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40335 2024-12-09T14:27:24,130 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40335 connecting to ZooKeeper ensemble=127.0.0.1:52626 2024-12-09T14:27:24,131 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:27:24,133 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:27:24,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:403350x0, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:27:24,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:403350x0, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:27:24,140 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T14:27:24,141 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40335-0x1012b94e91d0001 connected 2024-12-09T14:27:24,143 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T14:27:24,144 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T14:27:24,145 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:27:24,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40335 2024-12-09T14:27:24,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40335 2024-12-09T14:27:24,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40335 2024-12-09T14:27:24,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40335 2024-12-09T14:27:24,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40335 2024-12-09T14:27:24,164 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f4e784dc7cb5:39993 2024-12-09T14:27:24,164 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:24,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:27:24,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:27:24,173 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:24,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T14:27:24,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:24,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:24,176 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T14:27:24,177 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f4e784dc7cb5,39993,1733754444047 from backup master directory 2024-12-09T14:27:24,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:24,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:27:24,178 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:27:24,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:27:24,178 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:24,184 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/hbase.id] with ID: 6d5de013-1a40-41bd-8452-67e210ec1a80 2024-12-09T14:27:24,184 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/.tmp/hbase.id 2024-12-09T14:27:24,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:27:24,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:27:24,599 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/.tmp/hbase.id]:[hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/hbase.id] 2024-12-09T14:27:24,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:24,614 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:27:24,615 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T14:27:24,618 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-12-09T14:27:24,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:24,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:24,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:24,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:27:24,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:27:25,031 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:27:25,032 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T14:27:25,032 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:27:25,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:27:25,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:27:25,444 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store 2024-12-09T14:27:25,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:27:25,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:27:25,455 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:27:25,455 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:27:25,456 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:25,456 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:25,456 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:27:25,456 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:25,456 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:25,456 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754445455Disabling compacts and flushes for region at 1733754445455Disabling writes for close at 1733754445456 (+1 ms)Writing region close event to WAL at 1733754445456Closed at 1733754445456 2024-12-09T14:27:25,457 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/.initializing 2024-12-09T14:27:25,457 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:25,461 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C39993%2C1733754444047, suffix=, logDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047, archiveDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/oldWALs, maxLogs=10 2024-12-09T14:27:25,462 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 2024-12-09T14:27:25,467 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 2024-12-09T14:27:25,471 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41207:41207),(127.0.0.1/127.0.0.1:41001:41001)] 2024-12-09T14:27:25,473 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:27:25,473 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:27:25,473 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,473 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T14:27:25,478 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:25,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T14:27:25,480 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:27:25,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T14:27:25,482 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:27:25,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T14:27:25,484 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:27:25,485 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,486 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,486 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,489 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,489 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,490 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T14:27:25,491 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:27:25,495 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:27:25,495 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840653, jitterRate=0.06894652545452118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T14:27:25,497 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733754445474Initializing all the Stores at 1733754445475 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754445475Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754445476 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754445476Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754445476Cleaning up temporary data from old regions at 1733754445489 (+13 ms)Region opened successfully at 1733754445497 (+8 ms) 2024-12-09T14:27:25,497 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T14:27:25,501 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12d705eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:27:25,502 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T14:27:25,502 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T14:27:25,502 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T14:27:25,503 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T14:27:25,508 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 5 msec 2024-12-09T14:27:25,509 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T14:27:25,509 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T14:27:25,511 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T14:27:25,512 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T14:27:25,513 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T14:27:25,514 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T14:27:25,515 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T14:27:25,517 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T14:27:25,517 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T14:27:25,519 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T14:27:25,521 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T14:27:25,522 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T14:27:25,523 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T14:27:25,525 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T14:27:25,526 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T14:27:25,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:27:25,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:27:25,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:25,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:25,530 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f4e784dc7cb5,39993,1733754444047, sessionid=0x1012b94e91d0000, setting cluster-up flag (Was=false) 2024-12-09T14:27:25,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:25,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:25,540 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T14:27:25,542 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:25,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:25,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:25,553 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T14:27:25,555 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:25,557 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T14:27:25,559 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T14:27:25,559 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T14:27:25,559 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T14:27:25,559 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f4e784dc7cb5,39993,1733754444047 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T14:27:25,560 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:27:25,560 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:27:25,560 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:27:25,560 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:27:25,561 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f4e784dc7cb5:0, corePoolSize=10, maxPoolSize=10 2024-12-09T14:27:25,561 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,561 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:27:25,561 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,563 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733754475563 2024-12-09T14:27:25,563 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T14:27:25,563 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T14:27:25,563 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T14:27:25,563 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T14:27:25,563 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T14:27:25,563 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T14:27:25,563 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,564 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T14:27:25,564 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T14:27:25,564 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T14:27:25,564 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T14:27:25,564 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:27:25,564 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T14:27:25,564 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T14:27:25,566 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,566 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T14:27:25,566 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754445564,5,FailOnTimeoutGroup] 2024-12-09T14:27:25,570 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754445566,5,FailOnTimeoutGroup] 2024-12-09T14:27:25,570 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,570 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T14:27:25,570 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,571 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:27:25,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:27:25,587 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T14:27:25,587 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a 2024-12-09T14:27:25,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:25,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:27:25,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:27:25,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:27:25,616 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:27:25,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:27:25,619 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:25,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:25,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:27:25,622 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:27:25,622 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:25,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:27:25,625 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:27:25,625 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:25,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:27:25,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:27:25,627 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:25,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:25,627 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:27:25,628 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740 2024-12-09T14:27:25,628 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740 2024-12-09T14:27:25,630 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:27:25,630 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:27:25,631 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:27:25,634 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:27:25,638 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:27:25,639 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713530, jitterRate=-0.09270024299621582}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:27:25,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733754445614Initializing all the Stores at 1733754445615 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754445616 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754445616Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754445616Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754445616Cleaning up temporary data from old regions at 1733754445630 (+14 ms)Region opened successfully at 1733754445640 (+10 ms) 2024-12-09T14:27:25,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:27:25,640 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:27:25,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:27:25,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:27:25,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:27:25,642 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:27:25,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754445640Disabling compacts and flushes for region at 1733754445640Disabling writes for close at 1733754445640Writing region close event to WAL at 1733754445642 (+2 ms)Closed at 1733754445642 2024-12-09T14:27:25,644 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:27:25,644 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T14:27:25,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T14:27:25,645 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:27:25,647 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T14:27:25,659 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(746): ClusterId : 6d5de013-1a40-41bd-8452-67e210ec1a80 2024-12-09T14:27:25,659 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T14:27:25,662 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T14:27:25,662 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T14:27:25,664 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T14:27:25,665 DEBUG [RS:0;f4e784dc7cb5:40335 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e3b6b7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:27:25,681 DEBUG [RS:0;f4e784dc7cb5:40335 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f4e784dc7cb5:40335 2024-12-09T14:27:25,681 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T14:27:25,681 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T14:27:25,681 DEBUG [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T14:27:25,682 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,39993,1733754444047 with port=40335, startcode=1733754444125 2024-12-09T14:27:25,682 DEBUG [RS:0;f4e784dc7cb5:40335 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T14:27:25,695 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47921, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T14:27:25,696 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39993 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:25,696 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39993 {}] master.ServerManager(517): Registering regionserver=f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:25,699 DEBUG [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a 2024-12-09T14:27:25,699 DEBUG [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39131 2024-12-09T14:27:25,699 DEBUG [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T14:27:25,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:27:25,702 DEBUG [RS:0;f4e784dc7cb5:40335 {}] zookeeper.ZKUtil(111): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:25,702 WARN [RS:0;f4e784dc7cb5:40335 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:27:25,702 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f4e784dc7cb5,40335,1733754444125] 2024-12-09T14:27:25,702 INFO [RS:0;f4e784dc7cb5:40335 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:27:25,702 DEBUG [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:25,714 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T14:27:25,719 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T14:27:25,727 INFO [RS:0;f4e784dc7cb5:40335 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:27:25,727 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,727 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T14:27:25,728 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T14:27:25,728 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,728 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,728 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,728 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,728 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,728 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:27:25,729 DEBUG [RS:0;f4e784dc7cb5:40335 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:27:25,734 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,734 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,734 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,735 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,735 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,735 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,40335,1733754444125-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:27:25,752 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T14:27:25,752 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,40335,1733754444125-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,752 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,753 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.Replication(171): f4e784dc7cb5,40335,1733754444125 started 2024-12-09T14:27:25,769 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:25,769 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1482): Serving as f4e784dc7cb5,40335,1733754444125, RpcServer on f4e784dc7cb5/172.17.0.3:40335, sessionid=0x1012b94e91d0001 2024-12-09T14:27:25,770 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T14:27:25,770 DEBUG [RS:0;f4e784dc7cb5:40335 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:25,770 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,40335,1733754444125' 2024-12-09T14:27:25,770 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T14:27:25,770 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T14:27:25,771 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T14:27:25,771 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T14:27:25,771 DEBUG [RS:0;f4e784dc7cb5:40335 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:25,771 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,40335,1733754444125' 2024-12-09T14:27:25,771 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T14:27:25,771 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T14:27:25,772 DEBUG [RS:0;f4e784dc7cb5:40335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T14:27:25,772 INFO [RS:0;f4e784dc7cb5:40335 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T14:27:25,772 INFO [RS:0;f4e784dc7cb5:40335 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T14:27:25,797 WARN [f4e784dc7cb5:39993 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T14:27:25,875 INFO [RS:0;f4e784dc7cb5:40335 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C40335%2C1733754444125, suffix=, logDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125, archiveDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/oldWALs, maxLogs=32 2024-12-09T14:27:25,876 INFO [RS:0;f4e784dc7cb5:40335 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 2024-12-09T14:27:25,894 INFO [RS:0;f4e784dc7cb5:40335 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 2024-12-09T14:27:25,896 DEBUG [RS:0;f4e784dc7cb5:40335 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41001:41001),(127.0.0.1/127.0.0.1:41207:41207)] 2024-12-09T14:27:26,047 DEBUG [f4e784dc7cb5:39993 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T14:27:26,048 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:26,050 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,40335,1733754444125, state=OPENING 2024-12-09T14:27:26,051 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T14:27:26,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:26,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:26,053 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:27:26,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:27:26,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:27:26,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,40335,1733754444125}] 2024-12-09T14:27:26,209 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T14:27:26,217 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47901, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T14:27:26,227 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T14:27:26,227 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:27:26,229 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C40335%2C1733754444125.meta, suffix=.meta, logDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125, archiveDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/oldWALs, maxLogs=32 2024-12-09T14:27:26,230 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta 2024-12-09T14:27:26,243 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta 2024-12-09T14:27:26,269 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41001:41001),(127.0.0.1/127.0.0.1:41207:41207)] 2024-12-09T14:27:26,270 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:27:26,271 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T14:27:26,271 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T14:27:26,271 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T14:27:26,271 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T14:27:26,271 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:27:26,271 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T14:27:26,271 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T14:27:26,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:27:26,274 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:27:26,274 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:26,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:26,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:27:26,275 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:27:26,275 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:26,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:26,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:27:26,276 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:27:26,276 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:26,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:26,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:27:26,279 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:27:26,279 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:26,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:27:26,279 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:27:26,280 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740 2024-12-09T14:27:26,282 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740 2024-12-09T14:27:26,283 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:27:26,283 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:27:26,284 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:27:26,285 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:27:26,286 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822167, jitterRate=0.04544016718864441}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:27:26,286 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T14:27:26,287 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733754446271Writing region info on filesystem at 1733754446271Initializing all the Stores at 1733754446272 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754446272Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754446272Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754446272Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754446273 (+1 ms)Cleaning up temporary data from old regions at 1733754446283 (+10 ms)Running coprocessor post-open hooks at 1733754446286 (+3 ms)Region opened successfully at 1733754446287 (+1 ms) 2024-12-09T14:27:26,288 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733754446209 2024-12-09T14:27:26,291 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T14:27:26,291 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T14:27:26,292 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:26,293 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,40335,1733754444125, state=OPEN 2024-12-09T14:27:26,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:27:26,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:27:26,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:27:26,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:27:26,298 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:26,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T14:27:26,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,40335,1733754444125 in 245 msec 2024-12-09T14:27:26,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T14:27:26,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 658 msec 2024-12-09T14:27:26,306 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:27:26,306 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T14:27:26,307 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:27:26,308 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,40335,1733754444125, seqNum=-1] 2024-12-09T14:27:26,308 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:27:26,310 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37911, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:27:26,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 757 msec 2024-12-09T14:27:26,316 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733754446316, completionTime=-1 2024-12-09T14:27:26,316 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T14:27:26,316 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T14:27:26,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T14:27:26,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733754506318 2024-12-09T14:27:26,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733754566318 2024-12-09T14:27:26,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T14:27:26,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,39993,1733754444047-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:26,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,39993,1733754444047-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:26,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,39993,1733754444047-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:26,319 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f4e784dc7cb5:39993, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:26,319 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:26,320 DEBUG [master/f4e784dc7cb5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T14:27:26,322 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:26,326 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.148sec 2024-12-09T14:27:26,326 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T14:27:26,326 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T14:27:26,326 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T14:27:26,326 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T14:27:26,326 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T14:27:26,326 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,39993,1733754444047-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:27:26,327 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,39993,1733754444047-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T14:27:26,329 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T14:27:26,329 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T14:27:26,329 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,39993,1733754444047-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:27:26,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5921b3be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:27:26,358 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f4e784dc7cb5,39993,-1 for getting cluster id 2024-12-09T14:27:26,358 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T14:27:26,368 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6d5de013-1a40-41bd-8452-67e210ec1a80' 2024-12-09T14:27:26,369 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T14:27:26,369 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6d5de013-1a40-41bd-8452-67e210ec1a80" 2024-12-09T14:27:26,369 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22d5e78c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:27:26,369 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f4e784dc7cb5,39993,-1] 2024-12-09T14:27:26,370 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T14:27:26,370 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:26,372 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48548, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T14:27:26,373 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37687418, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:27:26,373 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:27:26,374 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,40335,1733754444125, seqNum=-1] 2024-12-09T14:27:26,375 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:27:26,384 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59244, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:27:26,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:26,387 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:27:26,391 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T14:27:26,391 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-09T14:27:26,391 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-09T14:27:26,392 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T14:27:26,393 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:26,393 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3777a673 2024-12-09T14:27:26,393 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T14:27:26,396 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48560, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T14:27:26,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39993 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T14:27:26,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39993 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T14:27:26,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39993 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:27:26,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39993 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T14:27:26,402 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T14:27:26,402 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:26,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39993 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-09T14:27:26,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:27:26,407 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T14:27:26,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741835_1011 (size=395) 2024-12-09T14:27:26,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741835_1011 (size=395) 2024-12-09T14:27:26,427 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e31851bebb57e7b89e7d932d84ee55a4, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a 2024-12-09T14:27:26,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36437 is added to blk_1073741836_1012 (size=78) 2024-12-09T14:27:26,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33915 is added to blk_1073741836_1012 (size=78) 2024-12-09T14:27:26,446 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:27:26,446 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing e31851bebb57e7b89e7d932d84ee55a4, disabling compactions & flushes 2024-12-09T14:27:26,446 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:26,446 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:26,446 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. after waiting 0 ms 2024-12-09T14:27:26,446 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:26,446 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:26,446 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for e31851bebb57e7b89e7d932d84ee55a4: Waiting for close lock at 1733754446446Disabling compacts and flushes for region at 1733754446446Disabling writes for close at 1733754446446Writing region close event to WAL at 1733754446446Closed at 1733754446446 2024-12-09T14:27:26,449 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T14:27:26,449 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733754446449"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733754446449"}]},"ts":"1733754446449"} 2024-12-09T14:27:26,454 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T14:27:26,456 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T14:27:26,456 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754446456"}]},"ts":"1733754446456"} 2024-12-09T14:27:26,459 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-09T14:27:26,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=e31851bebb57e7b89e7d932d84ee55a4, ASSIGN}] 2024-12-09T14:27:26,462 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=e31851bebb57e7b89e7d932d84ee55a4, ASSIGN 2024-12-09T14:27:26,463 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=e31851bebb57e7b89e7d932d84ee55a4, ASSIGN; state=OFFLINE, location=f4e784dc7cb5,40335,1733754444125; forceNewPlan=false, retain=false 2024-12-09T14:27:26,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:26,615 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e31851bebb57e7b89e7d932d84ee55a4, regionState=OPENING, regionLocation=f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:26,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=e31851bebb57e7b89e7d932d84ee55a4, ASSIGN because future has completed 2024-12-09T14:27:26,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e31851bebb57e7b89e7d932d84ee55a4, server=f4e784dc7cb5,40335,1733754444125}] 2024-12-09T14:27:26,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:26,783 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:26,783 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e31851bebb57e7b89e7d932d84ee55a4, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:27:26,784 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,784 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:27:26,784 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,784 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,786 INFO [StoreOpener-e31851bebb57e7b89e7d932d84ee55a4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,788 INFO [StoreOpener-e31851bebb57e7b89e7d932d84ee55a4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e31851bebb57e7b89e7d932d84ee55a4 columnFamilyName info 2024-12-09T14:27:26,788 DEBUG [StoreOpener-e31851bebb57e7b89e7d932d84ee55a4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:27:26,790 INFO [StoreOpener-e31851bebb57e7b89e7d932d84ee55a4-1 {}] regionserver.HStore(327): Store=e31851bebb57e7b89e7d932d84ee55a4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:27:26,790 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,791 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,792 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,792 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,792 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,795 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,798 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:27:26,799 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e31851bebb57e7b89e7d932d84ee55a4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782028, jitterRate=-0.005600467324256897}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T14:27:26,799 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:26,800 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e31851bebb57e7b89e7d932d84ee55a4: Running coprocessor pre-open hook at 1733754446784Writing region info on filesystem at 1733754446784Initializing all the Stores at 1733754446785 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754446786 (+1 ms)Cleaning up temporary data from old regions at 1733754446792 (+6 ms)Running coprocessor post-open hooks at 1733754446799 (+7 ms)Region opened successfully at 1733754446800 (+1 ms) 2024-12-09T14:27:26,801 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4., pid=6, masterSystemTime=1733754446774 2024-12-09T14:27:26,807 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:26,807 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:26,809 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e31851bebb57e7b89e7d932d84ee55a4, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:26,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e31851bebb57e7b89e7d932d84ee55a4, server=f4e784dc7cb5,40335,1733754444125 because future has completed 2024-12-09T14:27:26,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T14:27:26,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e31851bebb57e7b89e7d932d84ee55a4, server=f4e784dc7cb5,40335,1733754444125 in 195 msec 2024-12-09T14:27:26,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T14:27:26,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=e31851bebb57e7b89e7d932d84ee55a4, ASSIGN in 357 msec 2024-12-09T14:27:26,822 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T14:27:26,822 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754446822"}]},"ts":"1733754446822"} 2024-12-09T14:27:26,825 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-09T14:27:26,827 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T14:27:26,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 430 msec 2024-12-09T14:27:27,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:27,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:27,821 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T14:27:27,822 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T14:27:27,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T14:27:27,822 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-09T14:27:27,823 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:27:27,823 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T14:27:28,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:28,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:29,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:29,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:30,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:30,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:31,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:31,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:31,798 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:27:31,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:31,826 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T14:27:31,827 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-09T14:27:32,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:32,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:33,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:33,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:34,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:34,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:35,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:35,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39993 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:27:36,490 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-09T14:27:36,490 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-09T14:27:36,495 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T14:27:36,495 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:36,500 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4., hostname=f4e784dc7cb5,40335,1733754444125, seqNum=2] 2024-12-09T14:27:36,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:36,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:37,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:37,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:38,504 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 2024-12-09T14:27:38,504 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:38,504 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:38,504 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:36437,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:38,505 WARN [DataStreamer for file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 block BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK], DatanodeInfoWithStorage[127.0.0.1:36437,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36437,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]) is bad. 2024-12-09T14:27:38,505 WARN [DataStreamer for file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 block BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36437,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK], DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36437,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]) is bad. 2024-12-09T14:27:38,505 WARN [PacketResponder: BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36437] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,505 WARN [DataStreamer for file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta block BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36437,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK], DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36437,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]) is bad. 2024-12-09T14:27:38,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:38042 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38042 dst: /127.0.0.1:36437 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764759968_22 at /127.0.0.1:35552 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35552 dst: /127.0.0.1:33915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:35596 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35596 dst: /127.0.0.1:33915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764759968_22 at /127.0.0.1:37996 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37996 dst: /127.0.0.1:36437 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:38028 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38028 dst: /127.0.0.1:36437 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:35582 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35582 dst: /127.0.0.1:33915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18940221{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:38,510 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c6cb23c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:38,510 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:38,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2efee71d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:38,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ea97727{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:38,512 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:27:38,512 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:27:38,512 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-24484884-172.17.0.3-1733754442999 (Datanode Uuid a39989dc-c6cb-4f27-9945-581b0e757113) service to localhost/127.0.0.1:39131 2024-12-09T14:27:38,512 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:27:38,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data3/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:38,513 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data4/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:38,513 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:27:38,522 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:38,525 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:38,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:38,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:38,525 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:27:38,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21e6ff2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:38,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a4031cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:38,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:38,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:38,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b5c831e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/java.io.tmpdir/jetty-localhost-38869-hadoop-hdfs-3_4_1-tests_jar-_-any-9642066934385503537/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:38,647 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55cf8f7d{HTTP/1.1, (http/1.1)}{localhost:38869} 2024-12-09T14:27:38,647 INFO [Time-limited test {}] server.Server(415): Started @166505ms 2024-12-09T14:27:38,648 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:27:38,669 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:38,669 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:38,669 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:38,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:38126 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38126 dst: /127.0.0.1:33915 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,670 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:38132 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38132 dst: /127.0.0.1:33915 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,670 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764759968_22 at /127.0.0.1:38114 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38114 dst: /127.0.0.1:33915 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:38,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d9c9e99{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:38,679 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5493c38f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:38,679 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:38,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f0827f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:38,680 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@482b9b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:38,681 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:27:38,681 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-24484884-172.17.0.3-1733754442999 (Datanode Uuid faed1f47-158a-41f2-b774-1ed84e43e2b1) service to localhost/127.0.0.1:39131 2024-12-09T14:27:38,681 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:27:38,681 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:27:38,682 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data1/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:38,682 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data2/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:38,682 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:27:38,698 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:38,700 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:38,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:38,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:38,704 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:27:38,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d1373c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:38,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@358d2587{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:38,740 WARN [Thread-1332 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:27:38,742 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6960965d7b447eea with lease ID 0x9276d1f5412f34bf: from storage DS-68ed176b-cc2f-4055-bdb2-972a31e67590 node DatanodeRegistration(127.0.0.1:33093, datanodeUuid=a39989dc-c6cb-4f27-9945-581b0e757113, infoPort=44395, infoSecurePort=0, ipcPort=34155, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:38,743 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6960965d7b447eea with lease ID 0x9276d1f5412f34bf: from storage DS-de1175f2-b1b7-4524-9740-cac2fe07ff60 node DatanodeRegistration(127.0.0.1:33093, datanodeUuid=a39989dc-c6cb-4f27-9945-581b0e757113, infoPort=44395, infoSecurePort=0, ipcPort=34155, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:38,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@419bc4ca{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/java.io.tmpdir/jetty-localhost-34443-hadoop-hdfs-3_4_1-tests_jar-_-any-7279958937814637484/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:38,821 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e87d64e{HTTP/1.1, (http/1.1)}{localhost:34443} 2024-12-09T14:27:38,821 INFO [Time-limited test {}] server.Server(415): Started @166679ms 2024-12-09T14:27:38,822 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:27:38,943 WARN [Thread-1363 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:27:38,945 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe2d88c3b669de190 with lease ID 0x9276d1f5412f34c0: from storage DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e node DatanodeRegistration(127.0.0.1:46205, datanodeUuid=faed1f47-158a-41f2-b774-1ed84e43e2b1, infoPort=42789, infoSecurePort=0, ipcPort=43425, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:38,945 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe2d88c3b669de190 with lease ID 0x9276d1f5412f34c0: from storage DS-7af7881e-c735-44fc-b259-2b7dc24ee1ae node DatanodeRegistration(127.0.0.1:46205, datanodeUuid=faed1f47-158a-41f2-b774-1ed84e43e2b1, infoPort=42789, infoSecurePort=0, ipcPort=43425, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:39,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:39,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:39,872 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-09T14:27:39,875 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-09T14:27:39,877 ERROR [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a-prefix:f4e784dc7cb5,40335,1733754444125 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:39,877 WARN [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a-prefix:f4e784dc7cb5,40335,1733754444125 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:39,877 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C40335%2C1733754444125:(num 1733754445876) roll requested 2024-12-09T14:27:39,877 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 2024-12-09T14:27:39,883 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 newFile=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 2024-12-09T14:27:39,884 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:39,884 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:39,884 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:39,884 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:39,884 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:39,884 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 2024-12-09T14:27:39,885 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:39,885 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:39,885 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 2024-12-09T14:27:39,885 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44395:44395),(127.0.0.1/127.0.0.1:42789:42789)] 2024-12-09T14:27:39,885 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 is not closed yet, will try archiving it next time 2024-12-09T14:27:39,885 WARN [IPC Server handler 3 on default port 39131 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-12-09T14:27:39,885 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 after 0ms 2024-12-09T14:27:40,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:40,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:41,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:41,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:41,743 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T14:27:41,888 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-09T14:27:42,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:42,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:43,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:43,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:43,886 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 after 4001ms 2024-12-09T14:27:43,891 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:46205,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:43,892 WARN [DataStreamer for file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 block BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33093,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK], DatanodeInfoWithStorage[127.0.0.1:46205,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46205,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]) is bad. 2024-12-09T14:27:43,892 WARN [PacketResponder: BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46205] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:43,892 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:51880 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51880 dst: /127.0.0.1:33093 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:43,892 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:42310 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:46205:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42310 dst: /127.0.0.1:46205 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:43,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@419bc4ca{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:43,896 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e87d64e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:43,896 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:43,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@358d2587{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:43,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d1373c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:43,899 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:27:43,899 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:27:43,899 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:27:43,899 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-24484884-172.17.0.3-1733754442999 (Datanode Uuid faed1f47-158a-41f2-b774-1ed84e43e2b1) service to localhost/127.0.0.1:39131 2024-12-09T14:27:43,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data1/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:43,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data2/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:43,900 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:27:43,909 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:43,911 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:43,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:43,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:43,913 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:27:43,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35808fda{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:43,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca8564b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:44,049 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24369946{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/java.io.tmpdir/jetty-localhost-34015-hadoop-hdfs-3_4_1-tests_jar-_-any-12597695288666485717/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:44,049 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@290ac13e{HTTP/1.1, (http/1.1)}{localhost:34015} 2024-12-09T14:27:44,049 INFO [Time-limited test {}] server.Server(415): Started @171907ms 2024-12-09T14:27:44,052 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:27:44,123 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:44,127 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1467366520_22 at /127.0.0.1:51902 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51902 dst: /127.0.0.1:33093 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:44,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b5c831e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:44,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55cf8f7d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:44,156 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:44,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a4031cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:44,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21e6ff2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:44,158 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:27:44,158 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:27:44,158 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-24484884-172.17.0.3-1733754442999 (Datanode Uuid a39989dc-c6cb-4f27-9945-581b0e757113) service to localhost/127.0.0.1:39131 2024-12-09T14:27:44,158 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:27:44,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data3/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:44,164 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data4/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:44,164 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:27:44,181 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:44,184 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:44,190 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:44,190 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:44,190 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:27:44,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e29ac6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:44,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@528eeea6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:44,216 WARN [Thread-1406 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:27:44,218 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7521a8363ff22b79 with lease ID 0x9276d1f5412f34c1: from storage DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e node DatanodeRegistration(127.0.0.1:41145, datanodeUuid=faed1f47-158a-41f2-b774-1ed84e43e2b1, infoPort=38955, infoSecurePort=0, ipcPort=33753, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:44,218 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7521a8363ff22b79 with lease ID 0x9276d1f5412f34c1: from storage DS-7af7881e-c735-44fc-b259-2b7dc24ee1ae node DatanodeRegistration(127.0.0.1:41145, datanodeUuid=faed1f47-158a-41f2-b774-1ed84e43e2b1, infoPort=38955, infoSecurePort=0, ipcPort=33753, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:44,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e608499{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/java.io.tmpdir/jetty-localhost-43289-hadoop-hdfs-3_4_1-tests_jar-_-any-17414397153910266474/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:44,339 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3172a7c6{HTTP/1.1, (http/1.1)}{localhost:43289} 2024-12-09T14:27:44,339 INFO [Time-limited test {}] server.Server(415): Started @172197ms 2024-12-09T14:27:44,340 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:27:44,459 WARN [Thread-1437 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:27:44,462 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdfd9e5dd57122169 with lease ID 0x9276d1f5412f34c2: from storage DS-68ed176b-cc2f-4055-bdb2-972a31e67590 node DatanodeRegistration(127.0.0.1:42887, datanodeUuid=a39989dc-c6cb-4f27-9945-581b0e757113, infoPort=38947, infoSecurePort=0, ipcPort=34755, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:44,462 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdfd9e5dd57122169 with lease ID 0x9276d1f5412f34c2: from storage DS-de1175f2-b1b7-4524-9740-cac2fe07ff60 node DatanodeRegistration(127.0.0.1:42887, datanodeUuid=a39989dc-c6cb-4f27-9945-581b0e757113, infoPort=38947, infoSecurePort=0, ipcPort=34755, storageInfo=lv=-57;cid=testClusterID;nsid=2103014982;c=1733754442999), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:27:44,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:44,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:45,369 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-09T14:27:45,372 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-09T14:27:45,373 ERROR [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a-prefix:f4e784dc7cb5,40335,1733754444125 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33093,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:45,374 WARN [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a-prefix:f4e784dc7cb5,40335,1733754444125 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33093,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:45,374 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C40335%2C1733754444125:(num 1733754459877) roll requested 2024-12-09T14:27:45,374 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 2024-12-09T14:27:45,387 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 newFile=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 2024-12-09T14:27:45,387 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:45,387 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:45,387 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:45,389 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:45,389 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:45,390 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 2024-12-09T14:27:45,390 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33093,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:45,390 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33093,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:45,390 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 2024-12-09T14:27:45,391 WARN [IPC Server handler 2 on default port 39131 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-09T14:27:45,391 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 after 1ms 2024-12-09T14:27:45,399 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38955:38955),(127.0.0.1/127.0.0.1:38947:38947)] 2024-12-09T14:27:45,399 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 is not closed yet, will try archiving it next time 2024-12-09T14:27:45,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:45,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:46,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:46,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:47,401 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:47,407 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 newFile=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:47,407 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:47,407 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:47,408 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:47,408 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:47,408 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:47,408 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:47,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741838_1019 (size=1264) 2024-12-09T14:27:47,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741838_1019 (size=1264) 2024-12-09T14:27:47,410 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38955:38955),(127.0.0.1/127.0.0.1:38947:38947)] 2024-12-09T14:27:47,411 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 is not closed yet, will try archiving it next time 2024-12-09T14:27:47,411 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 is not closed yet, will try archiving it next time 2024-12-09T14:27:47,411 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 2024-12-09T14:27:47,411 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 2024-12-09T14:27:47,411 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 after 0ms 2024-12-09T14:27:47,411 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 2024-12-09T14:27:47,420 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733754446800/Put/vlen=218/seqid=0] 2024-12-09T14:27:47,420 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733754456501/Put/vlen=1045/seqid=0] 2024-12-09T14:27:47,420 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754445876 2024-12-09T14:27:47,420 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 2024-12-09T14:27:47,420 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 2024-12-09T14:27:47,421 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 after 0ms 2024-12-09T14:27:47,421 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 2024-12-09T14:27:47,423 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733754459876/Put/vlen=1045/seqid=0] 2024-12-09T14:27:47,424 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733754461889/Put/vlen=1045/seqid=0] 2024-12-09T14:27:47,424 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 2024-12-09T14:27:47,424 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 2024-12-09T14:27:47,424 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 2024-12-09T14:27:47,424 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 after 0ms 2024-12-09T14:27:47,424 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754465374 2024-12-09T14:27:47,427 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733754465373/Put/vlen=1045/seqid=0] 2024-12-09T14:27:47,427 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:47,427 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:47,428 WARN [IPC Server handler 4 on default port 39131 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-09T14:27:47,428 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 after 1ms 2024-12-09T14:27:47,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:47,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:48,219 WARN [ResponseProcessor for block BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:48,219 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764759968_22 at /127.0.0.1:39822 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41145:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39822 dst: /127.0.0.1:41145 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41145 remote=/127.0.0.1:39822]. Total timeout mills is 60000, 59187 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:48,219 WARN [DataStreamer for file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 block BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41145,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK], DatanodeInfoWithStorage[127.0.0.1:42887,DS-68ed176b-cc2f-4055-bdb2-972a31e67590,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41145,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]) is bad. 2024-12-09T14:27:48,219 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-764759968_22 at /127.0.0.1:46964 [Receiving block BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42887:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46964 dst: /127.0.0.1:42887 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:27:48,220 WARN [DataStreamer for file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 block BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:48,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741839_1022 (size=85) 2024-12-09T14:27:48,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741839_1022 (size=85) 2024-12-09T14:27:48,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:48,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:49,392 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754459877 after 4002ms 2024-12-09T14:27:49,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:49,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:50,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:50,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:51,218 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T14:27:51,428 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 after 4001ms 2024-12-09T14:27:51,429 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:51,432 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:51,432 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-09T14:27:51,433 ERROR [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a-prefix:f4e784dc7cb5,40335,1733754444125.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:51,433 WARN [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a-prefix:f4e784dc7cb5,40335,1733754444125.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:51,433 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C40335%2C1733754444125.meta:.meta(num 1733754446230) roll requested 2024-12-09T14:27:51,433 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754471433.meta 2024-12-09T14:27:51,444 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,444 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,444 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,445 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,445 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,445 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754471433.meta 2024-12-09T14:27:51,445 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:51,445 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:51,445 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta 2024-12-09T14:27:51,446 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38947:38947),(127.0.0.1/127.0.0.1:38955:38955)] 2024-12-09T14:27:51,446 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta is not closed yet, will try archiving it next time 2024-12-09T14:27:51,446 WARN [IPC Server handler 3 on default port 39131 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-12-09T14:27:51,446 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta after 1ms 2024-12-09T14:27:51,461 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/info/9af3a44a7bb44da68d2077ba0079c55b is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4./info:regioninfo/1733754446808/Put/seqid=0 2024-12-09T14:27:51,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741841_1025 (size=7125) 2024-12-09T14:27:51,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741841_1025 (size=7125) 2024-12-09T14:27:51,467 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/info/9af3a44a7bb44da68d2077ba0079c55b 2024-12-09T14:27:51,487 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/ns/d87fedc3e0bc46db9ab05d43324355a1 is 43, key is default/ns:d/1733754446310/Put/seqid=0 2024-12-09T14:27:51,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741842_1026 (size=5153) 2024-12-09T14:27:51,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741842_1026 (size=5153) 2024-12-09T14:27:51,492 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/ns/d87fedc3e0bc46db9ab05d43324355a1 2024-12-09T14:27:51,510 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/table/17edfe8910184ed2ae74080c794ebb5f is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733754446822/Put/seqid=0 2024-12-09T14:27:51,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741843_1027 (size=5438) 2024-12-09T14:27:51,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741843_1027 (size=5438) 2024-12-09T14:27:51,516 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/table/17edfe8910184ed2ae74080c794ebb5f 2024-12-09T14:27:51,521 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/info/9af3a44a7bb44da68d2077ba0079c55b as hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/info/9af3a44a7bb44da68d2077ba0079c55b 2024-12-09T14:27:51,525 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/info/9af3a44a7bb44da68d2077ba0079c55b, entries=10, sequenceid=11, filesize=7.0 K 2024-12-09T14:27:51,526 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/ns/d87fedc3e0bc46db9ab05d43324355a1 as hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/ns/d87fedc3e0bc46db9ab05d43324355a1 2024-12-09T14:27:51,531 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/ns/d87fedc3e0bc46db9ab05d43324355a1, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T14:27:51,531 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/.tmp/table/17edfe8910184ed2ae74080c794ebb5f as hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/table/17edfe8910184ed2ae74080c794ebb5f 2024-12-09T14:27:51,536 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/table/17edfe8910184ed2ae74080c794ebb5f, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T14:27:51,537 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 105ms, sequenceid=11, compaction requested=false 2024-12-09T14:27:51,537 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T14:27:51,537 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e31851bebb57e7b89e7d932d84ee55a4 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-09T14:27:51,537 ERROR [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a-prefix:f4e784dc7cb5,40335,1733754444125 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:51,538 WARN [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a-prefix:f4e784dc7cb5,40335,1733754444125 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:51,538 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C40335%2C1733754444125:(num 1733754467400) roll requested 2024-12-09T14:27:51,538 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C40335%2C1733754444125.1733754471538 2024-12-09T14:27:51,542 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 newFile=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754471538 2024-12-09T14:27:51,543 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,543 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,543 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,543 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,543 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,543 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754471538 2024-12-09T14:27:51,543 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:51,544 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-24484884-172.17.0.3-1733754442999:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:51,544 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:51,547 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38947:38947),(127.0.0.1/127.0.0.1:38955:38955)] 2024-12-09T14:27:51,547 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 after 2ms 2024-12-09T14:27:51,547 DEBUG [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 is not closed yet, will try archiving it next time 2024-12-09T14:27:51,547 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 to hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/oldWALs/f4e784dc7cb5%2C40335%2C1733754444125.1733754467400 2024-12-09T14:27:51,563 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4/.tmp/info/fbf638b7a0ef453c982bad267a9ed88f is 1080, key is row1002/info:/1733754456501/Put/seqid=0 2024-12-09T14:27:51,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741845_1029 (size=9270) 2024-12-09T14:27:51,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741845_1029 (size=9270) 2024-12-09T14:27:51,568 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4/.tmp/info/fbf638b7a0ef453c982bad267a9ed88f 2024-12-09T14:27:51,574 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4/.tmp/info/fbf638b7a0ef453c982bad267a9ed88f as hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4/info/fbf638b7a0ef453c982bad267a9ed88f 2024-12-09T14:27:51,580 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4/info/fbf638b7a0ef453c982bad267a9ed88f, entries=4, sequenceid=8, filesize=9.1 K 2024-12-09T14:27:51,581 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for e31851bebb57e7b89e7d932d84ee55a4 in 44ms, sequenceid=8, compaction requested=false 2024-12-09T14:27:51,581 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e31851bebb57e7b89e7d932d84ee55a4: 2024-12-09T14:27:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T14:27:51,587 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:27:51,587 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:27:51,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:51,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:51,587 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T14:27:51,587 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T14:27:51,587 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=653564868, stopped=false 2024-12-09T14:27:51,587 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f4e784dc7cb5,39993,1733754444047 2024-12-09T14:27:51,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:27:51,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:51,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:27:51,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:51,591 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:27:51,591 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:27:51,591 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:27:51,591 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:27:51,592 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:27:51,592 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:51,592 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f4e784dc7cb5,40335,1733754444125' ***** 2024-12-09T14:27:51,592 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T14:27:51,597 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T14:27:51,597 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T14:27:51,597 INFO [RS:0;f4e784dc7cb5:40335 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T14:27:51,597 INFO [RS:0;f4e784dc7cb5:40335 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T14:27:51,597 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(3091): Received CLOSE for e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:51,598 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(959): stopping server f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:51,598 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:27:51,598 INFO [RS:0;f4e784dc7cb5:40335 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f4e784dc7cb5:40335. 2024-12-09T14:27:51,598 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e31851bebb57e7b89e7d932d84ee55a4, disabling compactions & flushes 2024-12-09T14:27:51,598 DEBUG [RS:0;f4e784dc7cb5:40335 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:27:51,598 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:51,598 DEBUG [RS:0;f4e784dc7cb5:40335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:51,598 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:51,598 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. after waiting 0 ms 2024-12-09T14:27:51,598 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:51,598 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T14:27:51,598 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T14:27:51,598 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T14:27:51,598 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T14:27:51,600 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T14:27:51,600 DEBUG [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, e31851bebb57e7b89e7d932d84ee55a4=TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4.} 2024-12-09T14:27:51,600 DEBUG [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e31851bebb57e7b89e7d932d84ee55a4 2024-12-09T14:27:51,600 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:27:51,600 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:27:51,601 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:27:51,601 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:27:51,601 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:27:51,605 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/default/TestLogRolling-testLogRollOnPipelineRestart/e31851bebb57e7b89e7d932d84ee55a4/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-09T14:27:51,606 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:51,606 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e31851bebb57e7b89e7d932d84ee55a4: Waiting for close lock at 1733754471598Running coprocessor pre-close hooks at 1733754471598Disabling compacts and flushes for region at 1733754471598Disabling writes for close at 1733754471598Writing region close event to WAL at 1733754471601 (+3 ms)Running coprocessor post-close hooks at 1733754471606 (+5 ms)Closed at 1733754471606 2024-12-09T14:27:51,606 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733754446396.e31851bebb57e7b89e7d932d84ee55a4. 2024-12-09T14:27:51,607 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T14:27:51,607 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:27:51,607 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:27:51,608 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754471600Running coprocessor pre-close hooks at 1733754471600Disabling compacts and flushes for region at 1733754471600Disabling writes for close at 1733754471601 (+1 ms)Writing region close event to WAL at 1733754471602 (+1 ms)Running coprocessor post-close hooks at 1733754471607 (+5 ms)Closed at 1733754471607 2024-12-09T14:27:51,608 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T14:27:51,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:51,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:51,736 INFO [regionserver/f4e784dc7cb5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:27:51,776 INFO [regionserver/f4e784dc7cb5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T14:27:51,776 INFO [regionserver/f4e784dc7cb5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T14:27:51,801 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(976): stopping server f4e784dc7cb5,40335,1733754444125; all regions closed. 2024-12-09T14:27:51,801 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,801 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,801 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,801 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,802 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:51,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741840_1023 (size=825) 2024-12-09T14:27:51,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741840_1023 (size=825) 2024-12-09T14:27:52,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:52,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:53,462 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T14:27:53,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:53,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:54,026 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T14:27:54,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:54,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:55,447 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta after 4002ms 2024-12-09T14:27:55,447 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/WALs/f4e784dc7cb5,40335,1733754444125/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta to hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/oldWALs/f4e784dc7cb5%2C40335%2C1733754444125.meta.1733754446230.meta 2024-12-09T14:27:55,450 DEBUG [RS:0;f4e784dc7cb5:40335 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/oldWALs 2024-12-09T14:27:55,450 INFO [RS:0;f4e784dc7cb5:40335 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C40335%2C1733754444125.meta:.meta(num 1733754471433) 2024-12-09T14:27:55,451 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,451 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,451 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,451 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,451 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741844_1028 (size=1162) 2024-12-09T14:27:55,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741844_1028 (size=1162) 2024-12-09T14:27:55,457 DEBUG [RS:0;f4e784dc7cb5:40335 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/oldWALs 2024-12-09T14:27:55,457 INFO [RS:0;f4e784dc7cb5:40335 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C40335%2C1733754444125:(num 1733754471538) 2024-12-09T14:27:55,457 DEBUG [RS:0;f4e784dc7cb5:40335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:27:55,457 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:27:55,457 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:27:55,457 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.ChoreService(370): Chore service for: regionserver/f4e784dc7cb5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T14:27:55,457 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:27:55,457 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:27:55,458 INFO [RS:0;f4e784dc7cb5:40335 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40335 2024-12-09T14:27:55,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f4e784dc7cb5,40335,1733754444125 2024-12-09T14:27:55,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:27:55,459 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:27:55,461 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f4e784dc7cb5,40335,1733754444125] 2024-12-09T14:27:55,462 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f4e784dc7cb5,40335,1733754444125 already deleted, retry=false 2024-12-09T14:27:55,462 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f4e784dc7cb5,40335,1733754444125 expired; onlineServers=0 2024-12-09T14:27:55,462 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f4e784dc7cb5,39993,1733754444047' ***** 2024-12-09T14:27:55,462 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T14:27:55,462 INFO [M:0;f4e784dc7cb5:39993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:27:55,462 INFO [M:0;f4e784dc7cb5:39993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:27:55,462 DEBUG [M:0;f4e784dc7cb5:39993 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T14:27:55,462 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T14:27:55,462 DEBUG [M:0;f4e784dc7cb5:39993 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T14:27:55,462 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754445566 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754445566,5,FailOnTimeoutGroup] 2024-12-09T14:27:55,462 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754445564 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754445564,5,FailOnTimeoutGroup] 2024-12-09T14:27:55,463 INFO [M:0;f4e784dc7cb5:39993 {}] hbase.ChoreService(370): Chore service for: master/f4e784dc7cb5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T14:27:55,463 INFO [M:0;f4e784dc7cb5:39993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:27:55,463 DEBUG [M:0;f4e784dc7cb5:39993 {}] master.HMaster(1795): Stopping service threads 2024-12-09T14:27:55,463 INFO [M:0;f4e784dc7cb5:39993 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T14:27:55,463 INFO [M:0;f4e784dc7cb5:39993 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:27:55,463 INFO [M:0;f4e784dc7cb5:39993 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T14:27:55,463 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T14:27:55,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T14:27:55,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:27:55,465 DEBUG [M:0;f4e784dc7cb5:39993 {}] zookeeper.ZKUtil(347): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T14:27:55,465 WARN [M:0;f4e784dc7cb5:39993 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T14:27:55,465 INFO [M:0;f4e784dc7cb5:39993 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/.lastflushedseqids 2024-12-09T14:27:55,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741846_1030 (size=139) 2024-12-09T14:27:55,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741846_1030 (size=139) 2024-12-09T14:27:55,471 INFO [M:0;f4e784dc7cb5:39993 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T14:27:55,471 INFO [M:0;f4e784dc7cb5:39993 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T14:27:55,471 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:27:55,471 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:55,471 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:55,471 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:27:55,472 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:55,472 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-12-09T14:27:55,472 ERROR [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData-prefix:f4e784dc7cb5,39993,1733754444047 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:55,472 WARN [FSHLog-0-hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData-prefix:f4e784dc7cb5,39993,1733754444047 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:55,472 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog f4e784dc7cb5%2C39993%2C1733754444047:(num 1733754445462) roll requested 2024-12-09T14:27:55,472 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C39993%2C1733754444047.1733754475472 2024-12-09T14:27:55,480 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,480 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,480 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,480 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,481 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,481 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754475472 2024-12-09T14:27:55,481 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:55,481 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33915,DS-ed69cfe7-a5af-44fe-8559-79bc8305fb2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T14:27:55,481 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 2024-12-09T14:27:55,482 WARN [IPC Server handler 2 on default port 39131 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1014 2024-12-09T14:27:55,482 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 after 1ms 2024-12-09T14:27:55,486 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38955:38955),(127.0.0.1/127.0.0.1:38947:38947)] 2024-12-09T14:27:55,486 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 is not closed yet, will try archiving it next time 2024-12-09T14:27:55,503 DEBUG [M:0;f4e784dc7cb5:39993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a773529c4f044b1ab8fe8146a8fe297c is 82, key is hbase:meta,,1/info:regioninfo/1733754446292/Put/seqid=0 2024-12-09T14:27:55,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741848_1033 (size=5672) 2024-12-09T14:27:55,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741848_1033 (size=5672) 2024-12-09T14:27:55,510 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a773529c4f044b1ab8fe8146a8fe297c 2024-12-09T14:27:55,530 DEBUG [M:0;f4e784dc7cb5:39993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5da079df9dd44bfba292ef5aa02dc7e1 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733754446828/Put/seqid=0 2024-12-09T14:27:55,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741849_1034 (size=6119) 2024-12-09T14:27:55,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741849_1034 (size=6119) 2024-12-09T14:27:55,535 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5da079df9dd44bfba292ef5aa02dc7e1 2024-12-09T14:27:55,554 DEBUG [M:0;f4e784dc7cb5:39993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5ca425b2b93a4e33abed5b289be9c30d is 69, key is f4e784dc7cb5,40335,1733754444125/rs:state/1733754445696/Put/seqid=0 2024-12-09T14:27:55,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741850_1035 (size=5156) 2024-12-09T14:27:55,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741850_1035 (size=5156) 2024-12-09T14:27:55,559 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5ca425b2b93a4e33abed5b289be9c30d 2024-12-09T14:27:55,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:55,561 INFO [RS:0;f4e784dc7cb5:40335 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:27:55,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40335-0x1012b94e91d0001, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:55,561 INFO [RS:0;f4e784dc7cb5:40335 {}] regionserver.HRegionServer(1031): Exiting; stopping=f4e784dc7cb5,40335,1733754444125; zookeeper connection closed. 2024-12-09T14:27:55,561 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e19ad15 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e19ad15 2024-12-09T14:27:55,561 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T14:27:55,578 DEBUG [M:0;f4e784dc7cb5:39993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a1ded84757fc44cf8889d461689226c8 is 52, key is load_balancer_on/state:d/1733754446389/Put/seqid=0 2024-12-09T14:27:55,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741851_1036 (size=5056) 2024-12-09T14:27:55,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741851_1036 (size=5056) 2024-12-09T14:27:55,589 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a1ded84757fc44cf8889d461689226c8 2024-12-09T14:27:55,596 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a773529c4f044b1ab8fe8146a8fe297c as hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a773529c4f044b1ab8fe8146a8fe297c 2024-12-09T14:27:55,601 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a773529c4f044b1ab8fe8146a8fe297c, entries=8, sequenceid=56, filesize=5.5 K 2024-12-09T14:27:55,602 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5da079df9dd44bfba292ef5aa02dc7e1 as hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5da079df9dd44bfba292ef5aa02dc7e1 2024-12-09T14:27:55,608 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5da079df9dd44bfba292ef5aa02dc7e1, entries=6, sequenceid=56, filesize=6.0 K 2024-12-09T14:27:55,609 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5ca425b2b93a4e33abed5b289be9c30d as hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5ca425b2b93a4e33abed5b289be9c30d 2024-12-09T14:27:55,614 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5ca425b2b93a4e33abed5b289be9c30d, entries=1, sequenceid=56, filesize=5.0 K 2024-12-09T14:27:55,615 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a1ded84757fc44cf8889d461689226c8 as hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a1ded84757fc44cf8889d461689226c8 2024-12-09T14:27:55,620 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a1ded84757fc44cf8889d461689226c8, entries=1, sequenceid=56, filesize=4.9 K 2024-12-09T14:27:55,622 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=56, compaction requested=false 2024-12-09T14:27:55,623 INFO [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:27:55,624 DEBUG [M:0;f4e784dc7cb5:39993 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754475471Disabling compacts and flushes for region at 1733754475471Disabling writes for close at 1733754475471Obtaining lock to block concurrent updates at 1733754475472 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733754475472Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733754475472Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733754475487 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733754475487Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733754475503 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733754475503Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733754475515 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733754475530 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733754475530Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733754475540 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733754475553 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733754475553Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733754475563 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733754475577 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733754475577Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12439d64: reopening flushed file at 1733754475596 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37debeff: reopening flushed file at 1733754475601 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42907368: reopening flushed file at 1733754475608 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23a4dc21: reopening flushed file at 1733754475614 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=56, compaction requested=false at 1733754475622 (+8 ms)Writing region close event to WAL at 1733754475623 (+1 ms)Closed at 1733754475623 2024-12-09T14:27:55,624 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,624 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,624 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,624 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,624 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:27:55,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42887 is added to blk_1073741847_1031 (size=757) 2024-12-09T14:27:55,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:55,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41145 is added to blk_1073741847_1031 (size=757) 2024-12-09T14:27:55,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:56,462 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1014: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T14:27:56,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:56,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:56,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:57,130 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:27:57,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:27:57,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:57,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:57,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:27:57,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T14:27:57,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T14:27:57,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T14:27:58,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:58,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:59,483 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 after 4002ms 2024-12-09T14:27:59,483 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/WALs/f4e784dc7cb5,39993,1733754444047/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 to hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/oldWALs/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 2024-12-09T14:27:59,487 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/MasterData/oldWALs/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462 to hdfs://localhost:39131/user/jenkins/test-data/e9f93771-235e-f43d-6c73-2c7b4942f76a/oldWALs/f4e784dc7cb5%2C39993%2C1733754444047.1733754445462$masterlocalwal$ 2024-12-09T14:27:59,487 INFO [M:0;f4e784dc7cb5:39993 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T14:27:59,487 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:27:59,487 INFO [M:0;f4e784dc7cb5:39993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39993 2024-12-09T14:27:59,487 INFO [M:0;f4e784dc7cb5:39993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:27:59,589 INFO [M:0;f4e784dc7cb5:39993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:27:59,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:59,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39993-0x1012b94e91d0000, quorum=127.0.0.1:52626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:27:59,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e608499{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:59,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3172a7c6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:59,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:59,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@528eeea6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:59,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e29ac6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:59,595 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:27:59,595 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:27:59,595 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-24484884-172.17.0.3-1733754442999 (Datanode Uuid a39989dc-c6cb-4f27-9945-581b0e757113) service to localhost/127.0.0.1:39131 2024-12-09T14:27:59,595 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:27:59,597 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:27:59,597 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data4/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:59,597 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data3/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:59,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24369946{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:27:59,601 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@290ac13e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:59,601 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:59,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca8564b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:59,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35808fda{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:59,603 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:27:59,603 WARN [BP-24484884-172.17.0.3-1733754442999 heartbeating to localhost/127.0.0.1:39131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-24484884-172.17.0.3-1733754442999 (Datanode Uuid faed1f47-158a-41f2-b774-1ed84e43e2b1) service to localhost/127.0.0.1:39131 2024-12-09T14:27:59,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data1/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:59,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/cluster_22f41878-c7c4-8c77-8579-49410f18bd2b/data/data2/current/BP-24484884-172.17.0.3-1733754442999 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:27:59,604 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:27:59,604 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:27:59,604 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:27:59,613 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49c2791f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:27:59,614 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65e5bfc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:27:59,614 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:27:59,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73ee6be8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:27:59,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@594180cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir/,STOPPED} 2024-12-09T14:27:59,625 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T14:27:59,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:59,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:27:59,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T14:27:59,660 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:39131 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39131 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39131 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39131 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39131 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=225 (was 147) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5686 (was 5984) 2024-12-09T14:27:59,669 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=225, ProcessCount=11, AvailableMemoryMB=5685 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.log.dir so I do NOT create it in target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/87063ab0-1394-5681-6d28-e05af9f44c1b/hadoop.tmp.dir so I do NOT create it in target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19, deleteOnExit=true 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/test.cache.data in system properties and HBase conf 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.log.dir in system properties and HBase conf 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T14:27:59,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T14:27:59,671 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/nfs.dump.dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/java.io.tmpdir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T14:27:59,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T14:27:59,686 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:27:59,772 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:59,776 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:27:59,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:27:59,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:27:59,777 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:27:59,778 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:27:59,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@252e2abb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:27:59,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@521d1c5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:27:59,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12208e1b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/java.io.tmpdir/jetty-localhost-42745-hadoop-hdfs-3_4_1-tests_jar-_-any-9991162886975898353/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:27:59,911 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56e526c{HTTP/1.1, (http/1.1)}{localhost:42745} 2024-12-09T14:27:59,911 INFO [Time-limited test {}] server.Server(415): Started @187769ms 2024-12-09T14:27:59,929 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:28:00,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:28:00,004 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:28:00,007 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:28:00,007 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:28:00,007 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:28:00,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48bfafbe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:28:00,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45ae7776{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:28:00,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ac76b28{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/java.io.tmpdir/jetty-localhost-40479-hadoop-hdfs-3_4_1-tests_jar-_-any-5950380662488320762/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:28:00,132 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@371e2711{HTTP/1.1, (http/1.1)}{localhost:40479} 2024-12-09T14:28:00,132 INFO [Time-limited test {}] server.Server(415): Started @187990ms 2024-12-09T14:28:00,133 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:28:00,170 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:28:00,174 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:28:00,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:28:00,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:28:00,176 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:28:00,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fc981fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:28:00,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@739c2ff2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:28:00,251 WARN [Thread-1632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/data/data2/current/BP-1705338099-172.17.0.3-1733754479705/current, will proceed with Du for space computation calculation, 2024-12-09T14:28:00,251 WARN [Thread-1631 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/data/data1/current/BP-1705338099-172.17.0.3-1733754479705/current, will proceed with Du for space computation calculation, 2024-12-09T14:28:00,276 WARN [Thread-1610 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:28:00,280 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68c8f1336a354d05 with lease ID 0x3a0da4059cdd9a0: Processing first storage report for DS-acf978a6-e36a-4ce4-98cf-b2d1f91b97f9 from datanode DatanodeRegistration(127.0.0.1:39499, datanodeUuid=891e3663-3bb8-4529-9689-0cc80f24421c, infoPort=44705, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=742258848;c=1733754479705) 2024-12-09T14:28:00,280 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68c8f1336a354d05 with lease ID 0x3a0da4059cdd9a0: from storage DS-acf978a6-e36a-4ce4-98cf-b2d1f91b97f9 node DatanodeRegistration(127.0.0.1:39499, datanodeUuid=891e3663-3bb8-4529-9689-0cc80f24421c, infoPort=44705, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=742258848;c=1733754479705), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:28:00,280 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68c8f1336a354d05 with lease ID 0x3a0da4059cdd9a0: Processing first storage report for DS-fcecea14-1ef0-45f7-89fe-9835dc8874c5 from datanode DatanodeRegistration(127.0.0.1:39499, datanodeUuid=891e3663-3bb8-4529-9689-0cc80f24421c, infoPort=44705, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=742258848;c=1733754479705) 2024-12-09T14:28:00,280 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68c8f1336a354d05 with lease ID 0x3a0da4059cdd9a0: from storage DS-fcecea14-1ef0-45f7-89fe-9835dc8874c5 node DatanodeRegistration(127.0.0.1:39499, datanodeUuid=891e3663-3bb8-4529-9689-0cc80f24421c, infoPort=44705, infoSecurePort=0, ipcPort=46059, storageInfo=lv=-57;cid=testClusterID;nsid=742258848;c=1733754479705), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:28:00,324 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b8edabe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/java.io.tmpdir/jetty-localhost-39999-hadoop-hdfs-3_4_1-tests_jar-_-any-8729557926195446889/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:28:00,325 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3838d9cb{HTTP/1.1, (http/1.1)}{localhost:39999} 2024-12-09T14:28:00,325 INFO [Time-limited test {}] server.Server(415): Started @188182ms 2024-12-09T14:28:00,326 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:28:00,447 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/data/data4/current/BP-1705338099-172.17.0.3-1733754479705/current, will proceed with Du for space computation calculation, 2024-12-09T14:28:00,447 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/data/data3/current/BP-1705338099-172.17.0.3-1733754479705/current, will proceed with Du for space computation calculation, 2024-12-09T14:28:00,469 WARN [Thread-1646 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:28:00,471 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6a88959be9c1b62 with lease ID 0x3a0da4059cdd9a1: Processing first storage report for DS-e4a3f8cc-c22a-40de-80bd-20571b5b56ec from datanode DatanodeRegistration(127.0.0.1:46833, datanodeUuid=1630dcff-49cc-4890-a289-40a2429bd4aa, infoPort=35885, infoSecurePort=0, ipcPort=43317, storageInfo=lv=-57;cid=testClusterID;nsid=742258848;c=1733754479705) 2024-12-09T14:28:00,471 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6a88959be9c1b62 with lease ID 0x3a0da4059cdd9a1: from storage DS-e4a3f8cc-c22a-40de-80bd-20571b5b56ec node DatanodeRegistration(127.0.0.1:46833, datanodeUuid=1630dcff-49cc-4890-a289-40a2429bd4aa, infoPort=35885, infoSecurePort=0, ipcPort=43317, storageInfo=lv=-57;cid=testClusterID;nsid=742258848;c=1733754479705), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:28:00,471 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6a88959be9c1b62 with lease ID 0x3a0da4059cdd9a1: Processing first storage report for DS-39d5e817-b724-4d41-8568-86f729f2dd6c from datanode DatanodeRegistration(127.0.0.1:46833, datanodeUuid=1630dcff-49cc-4890-a289-40a2429bd4aa, infoPort=35885, infoSecurePort=0, ipcPort=43317, storageInfo=lv=-57;cid=testClusterID;nsid=742258848;c=1733754479705) 2024-12-09T14:28:00,471 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6a88959be9c1b62 with lease ID 0x3a0da4059cdd9a1: from storage DS-39d5e817-b724-4d41-8568-86f729f2dd6c node DatanodeRegistration(127.0.0.1:46833, datanodeUuid=1630dcff-49cc-4890-a289-40a2429bd4aa, infoPort=35885, infoSecurePort=0, ipcPort=43317, storageInfo=lv=-57;cid=testClusterID;nsid=742258848;c=1733754479705), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:28:00,557 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c 2024-12-09T14:28:00,560 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/zookeeper_0, clientPort=59972, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T14:28:00,561 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59972 2024-12-09T14:28:00,561 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:00,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:00,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:28:00,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:28:00,572 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460 with version=8 2024-12-09T14:28:00,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase-staging 2024-12-09T14:28:00,574 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:28:00,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:00,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:00,574 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:28:00,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:00,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:28:00,574 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T14:28:00,574 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:28:00,575 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37717 2024-12-09T14:28:00,576 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37717 connecting to ZooKeeper ensemble=127.0.0.1:59972 2024-12-09T14:28:00,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:377170x0, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:28:00,584 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37717-0x1012b9577cf0000 connected 2024-12-09T14:28:00,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:00,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:00,610 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:28:00,611 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460, hbase.cluster.distributed=false 2024-12-09T14:28:00,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:28:00,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37717 2024-12-09T14:28:00,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37717 2024-12-09T14:28:00,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37717 2024-12-09T14:28:00,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37717 2024-12-09T14:28:00,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37717 2024-12-09T14:28:00,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:00,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:00,643 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:28:00,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:00,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:00,643 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:28:00,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:00,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:28:00,643 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T14:28:00,644 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:28:00,644 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33665 2024-12-09T14:28:00,646 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33665 connecting to ZooKeeper ensemble=127.0.0.1:59972 2024-12-09T14:28:00,647 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:00,649 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:00,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336650x0, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:28:00,656 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33665-0x1012b9577cf0001 connected 2024-12-09T14:28:00,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:28:00,657 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T14:28:00,657 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T14:28:00,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T14:28:00,659 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:28:00,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33665 2024-12-09T14:28:00,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33665 2024-12-09T14:28:00,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33665 2024-12-09T14:28:00,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33665 2024-12-09T14:28:00,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33665 2024-12-09T14:28:00,676 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f4e784dc7cb5:37717 2024-12-09T14:28:00,676 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:00,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:28:00,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:28:00,679 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:00,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T14:28:00,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,683 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T14:28:00,684 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f4e784dc7cb5,37717,1733754480574 from backup master directory 2024-12-09T14:28:00,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:00,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:28:00,685 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:28:00,686 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:00,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:28:00,692 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/hbase.id] with ID: 1a99d77b-b10a-4899-a2fa-6796d744e57a 2024-12-09T14:28:00,692 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/.tmp/hbase.id 2024-12-09T14:28:00,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:28:00,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:28:00,710 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/.tmp/hbase.id]:[hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/hbase.id] 2024-12-09T14:28:00,724 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:00,724 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T14:28:00,726 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T14:28:00,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:28:00,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:28:00,743 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:28:00,743 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T14:28:00,744 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:28:00,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:28:00,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:28:00,756 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store 2024-12-09T14:28:00,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:28:00,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:28:00,765 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:00,765 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:28:00,765 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:00,766 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:00,766 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:28:00,766 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:00,766 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:00,766 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754480765Disabling compacts and flushes for region at 1733754480765Disabling writes for close at 1733754480766 (+1 ms)Writing region close event to WAL at 1733754480766Closed at 1733754480766 2024-12-09T14:28:00,767 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/.initializing 2024-12-09T14:28:00,767 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/WALs/f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:00,769 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C37717%2C1733754480574, suffix=, logDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/WALs/f4e784dc7cb5,37717,1733754480574, archiveDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/oldWALs, maxLogs=10 2024-12-09T14:28:00,770 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C37717%2C1733754480574.1733754480770 2024-12-09T14:28:00,775 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/WALs/f4e784dc7cb5,37717,1733754480574/f4e784dc7cb5%2C37717%2C1733754480574.1733754480770 2024-12-09T14:28:00,776 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35885:35885),(127.0.0.1/127.0.0.1:44705:44705)] 2024-12-09T14:28:00,777 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:28:00,777 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:00,777 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,777 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T14:28:00,781 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:00,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T14:28:00,783 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:28:00,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T14:28:00,785 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:28:00,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T14:28:00,787 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:28:00,788 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,789 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,789 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,791 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,791 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,792 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T14:28:00,794 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:00,796 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:28:00,797 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882973, jitterRate=0.12275846302509308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T14:28:00,798 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733754480777Initializing all the Stores at 1733754480779 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754480779Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754480779Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754480779Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754480779Cleaning up temporary data from old regions at 1733754480791 (+12 ms)Region opened successfully at 1733754480797 (+6 ms) 2024-12-09T14:28:00,798 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T14:28:00,802 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@928817a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:28:00,804 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T14:28:00,804 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T14:28:00,804 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T14:28:00,804 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T14:28:00,805 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T14:28:00,806 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T14:28:00,806 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T14:28:00,809 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T14:28:00,811 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T14:28:00,813 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T14:28:00,813 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T14:28:00,814 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T14:28:00,816 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T14:28:00,816 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T14:28:00,817 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T14:28:00,819 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T14:28:00,820 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T14:28:00,825 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T14:28:00,828 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T14:28:00,829 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T14:28:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:28:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:28:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,832 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f4e784dc7cb5,37717,1733754480574, sessionid=0x1012b9577cf0000, setting cluster-up flag (Was=false) 2024-12-09T14:28:00,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,846 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T14:28:00,848 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:00,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:00,861 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T14:28:00,862 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:00,864 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T14:28:00,872 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T14:28:00,873 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T14:28:00,873 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T14:28:00,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f4e784dc7cb5,37717,1733754480574 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T14:28:00,875 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:28:00,875 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:28:00,875 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:28:00,875 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:28:00,875 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f4e784dc7cb5:0, corePoolSize=10, maxPoolSize=10 2024-12-09T14:28:00,875 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:00,875 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:28:00,875 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:00,877 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:28:00,877 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T14:28:00,878 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733754510878 2024-12-09T14:28:00,878 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,878 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T14:28:00,879 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T14:28:00,879 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T14:28:00,879 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T14:28:00,879 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T14:28:00,879 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T14:28:00,879 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T14:28:00,879 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:00,880 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T14:28:00,880 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T14:28:00,880 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T14:28:00,880 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T14:28:00,881 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T14:28:00,881 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754480881,5,FailOnTimeoutGroup] 2024-12-09T14:28:00,882 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754480881,5,FailOnTimeoutGroup] 2024-12-09T14:28:00,882 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:00,882 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T14:28:00,882 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:00,882 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:00,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:28:00,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:28:00,889 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T14:28:00,889 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460 2024-12-09T14:28:00,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:28:00,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:28:00,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:00,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:28:00,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:28:00,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:00,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:28:00,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:28:00,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:00,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:28:00,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:28:00,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:00,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:28:00,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:28:00,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:00,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:00,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:28:00,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740 2024-12-09T14:28:00,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740 2024-12-09T14:28:00,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:28:00,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:28:00,921 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:28:00,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:28:00,925 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:28:00,926 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729950, jitterRate=-0.07182176411151886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:28:00,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733754480904Initializing all the Stores at 1733754480906 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754480906Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754480906Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754480906Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754480906Cleaning up temporary data from old regions at 1733754480920 (+14 ms)Region opened successfully at 1733754480927 (+7 ms) 2024-12-09T14:28:00,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:28:00,927 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:28:00,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:28:00,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:28:00,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:28:00,928 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:28:00,928 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754480927Disabling compacts and flushes for region at 1733754480927Disabling writes for close at 1733754480927Writing region close event to WAL at 1733754480928 (+1 ms)Closed at 1733754480928 2024-12-09T14:28:00,930 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:28:00,930 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T14:28:00,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T14:28:00,933 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:28:00,935 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T14:28:00,964 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(746): ClusterId : 1a99d77b-b10a-4899-a2fa-6796d744e57a 2024-12-09T14:28:00,964 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T14:28:00,967 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T14:28:00,967 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T14:28:00,973 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T14:28:00,974 DEBUG [RS:0;f4e784dc7cb5:33665 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@790f58ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:28:00,987 DEBUG [RS:0;f4e784dc7cb5:33665 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f4e784dc7cb5:33665 2024-12-09T14:28:00,987 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T14:28:00,987 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T14:28:00,987 DEBUG [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T14:28:00,988 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,37717,1733754480574 with port=33665, startcode=1733754480642 2024-12-09T14:28:00,988 DEBUG [RS:0;f4e784dc7cb5:33665 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T14:28:00,990 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44399, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T14:28:00,991 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37717 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:00,991 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37717 {}] master.ServerManager(517): Registering regionserver=f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:00,993 DEBUG [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460 2024-12-09T14:28:00,993 DEBUG [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37369 2024-12-09T14:28:00,993 DEBUG [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T14:28:00,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:28:00,999 DEBUG [RS:0;f4e784dc7cb5:33665 {}] zookeeper.ZKUtil(111): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:00,999 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f4e784dc7cb5,33665,1733754480642] 2024-12-09T14:28:00,999 WARN [RS:0;f4e784dc7cb5:33665 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:28:00,999 INFO [RS:0;f4e784dc7cb5:33665 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:28:00,999 DEBUG [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:01,006 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T14:28:01,010 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T14:28:01,011 INFO [RS:0;f4e784dc7cb5:33665 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:28:01,011 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,011 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T14:28:01,012 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T14:28:01,012 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,012 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,012 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,012 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,012 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:28:01,013 DEBUG [RS:0;f4e784dc7cb5:33665 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:28:01,019 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,019 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,019 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,019 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,019 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,019 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,33665,1733754480642-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:28:01,052 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T14:28:01,053 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,33665,1733754480642-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,053 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,053 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.Replication(171): f4e784dc7cb5,33665,1733754480642 started 2024-12-09T14:28:01,069 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,070 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1482): Serving as f4e784dc7cb5,33665,1733754480642, RpcServer on f4e784dc7cb5/172.17.0.3:33665, sessionid=0x1012b9577cf0001 2024-12-09T14:28:01,070 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T14:28:01,070 DEBUG [RS:0;f4e784dc7cb5:33665 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:01,070 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,33665,1733754480642' 2024-12-09T14:28:01,070 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T14:28:01,071 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T14:28:01,072 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T14:28:01,072 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T14:28:01,072 DEBUG [RS:0;f4e784dc7cb5:33665 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:01,072 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,33665,1733754480642' 2024-12-09T14:28:01,072 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T14:28:01,073 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T14:28:01,073 DEBUG [RS:0;f4e784dc7cb5:33665 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T14:28:01,073 INFO [RS:0;f4e784dc7cb5:33665 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T14:28:01,073 INFO [RS:0;f4e784dc7cb5:33665 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T14:28:01,085 WARN [f4e784dc7cb5:37717 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T14:28:01,176 INFO [RS:0;f4e784dc7cb5:33665 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C33665%2C1733754480642, suffix=, logDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642, archiveDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/oldWALs, maxLogs=32 2024-12-09T14:28:01,177 INFO [RS:0;f4e784dc7cb5:33665 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C33665%2C1733754480642.1733754481177 2024-12-09T14:28:01,190 INFO [RS:0;f4e784dc7cb5:33665 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754481177 2024-12-09T14:28:01,197 DEBUG [RS:0;f4e784dc7cb5:33665 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44705:44705),(127.0.0.1/127.0.0.1:35885:35885)] 2024-12-09T14:28:01,336 DEBUG [f4e784dc7cb5:37717 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T14:28:01,336 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:01,338 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,33665,1733754480642, state=OPENING 2024-12-09T14:28:01,341 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T14:28:01,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:01,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:01,344 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:28:01,344 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:28:01,344 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:28:01,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,33665,1733754480642}] 2024-12-09T14:28:01,498 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T14:28:01,500 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54607, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T14:28:01,504 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T14:28:01,504 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:28:01,507 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C33665%2C1733754480642.meta, suffix=.meta, logDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642, archiveDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/oldWALs, maxLogs=32 2024-12-09T14:28:01,507 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C33665%2C1733754480642.meta.1733754481507.meta 2024-12-09T14:28:01,519 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.meta.1733754481507.meta 2024-12-09T14:28:01,527 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44705:44705),(127.0.0.1/127.0.0.1:35885:35885)] 2024-12-09T14:28:01,532 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:28:01,532 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T14:28:01,532 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T14:28:01,532 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T14:28:01,533 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T14:28:01,533 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:01,533 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T14:28:01,533 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T14:28:01,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:28:01,536 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:28:01,536 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:01,537 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:01,537 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:28:01,538 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:28:01,538 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:01,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:01,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:28:01,540 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:28:01,540 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:01,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:01,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:28:01,541 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:28:01,541 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:01,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:01,542 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:28:01,543 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740 2024-12-09T14:28:01,544 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740 2024-12-09T14:28:01,545 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:28:01,545 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:28:01,546 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:28:01,548 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:28:01,549 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735051, jitterRate=-0.06533543765544891}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:28:01,549 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T14:28:01,549 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733754481533Writing region info on filesystem at 1733754481533Initializing all the Stores at 1733754481534 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754481534Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754481534Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754481534Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754481534Cleaning up temporary data from old regions at 1733754481545 (+11 ms)Running coprocessor post-open hooks at 1733754481549 (+4 ms)Region opened successfully at 1733754481549 2024-12-09T14:28:01,551 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733754481497 2024-12-09T14:28:01,554 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T14:28:01,554 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T14:28:01,555 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:01,557 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,33665,1733754480642, state=OPEN 2024-12-09T14:28:01,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:28:01,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:28:01,562 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:01,562 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:28:01,562 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:28:01,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T14:28:01,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,33665,1733754480642 in 218 msec 2024-12-09T14:28:01,569 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T14:28:01,569 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 636 msec 2024-12-09T14:28:01,570 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:28:01,570 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T14:28:01,572 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:28:01,572 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,33665,1733754480642, seqNum=-1] 2024-12-09T14:28:01,572 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:28:01,574 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60529, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:28:01,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 740 msec 2024-12-09T14:28:01,610 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733754481610, completionTime=-1 2024-12-09T14:28:01,610 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T14:28:01,611 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T14:28:01,613 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T14:28:01,613 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733754541613 2024-12-09T14:28:01,613 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733754601613 2024-12-09T14:28:01,613 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T14:28:01,614 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,37717,1733754480574-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,614 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,37717,1733754480574-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,614 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,37717,1733754480574-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,614 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f4e784dc7cb5:37717, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,614 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,616 DEBUG [master/f4e784dc7cb5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T14:28:01,618 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,623 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.937sec 2024-12-09T14:28:01,624 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T14:28:01,624 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T14:28:01,624 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T14:28:01,624 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T14:28:01,624 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T14:28:01,624 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,37717,1733754480574-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:28:01,624 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,37717,1733754480574-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T14:28:01,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:01,631 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T14:28:01,631 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T14:28:01,631 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,37717,1733754480574-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:01,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:01,667 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65345c29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:28:01,667 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f4e784dc7cb5,37717,-1 for getting cluster id 2024-12-09T14:28:01,668 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T14:28:01,670 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1a99d77b-b10a-4899-a2fa-6796d744e57a' 2024-12-09T14:28:01,671 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T14:28:01,671 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1a99d77b-b10a-4899-a2fa-6796d744e57a" 2024-12-09T14:28:01,671 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23273074, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:28:01,671 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f4e784dc7cb5,37717,-1] 2024-12-09T14:28:01,671 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T14:28:01,672 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:28:01,673 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40810, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T14:28:01,674 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b4013d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:28:01,675 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:28:01,676 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,33665,1733754480642, seqNum=-1] 2024-12-09T14:28:01,676 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:28:01,678 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40726, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:28:01,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:01,680 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:01,683 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T14:28:01,683 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T14:28:01,684 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:01,684 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@12bafa01 2024-12-09T14:28:01,684 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T14:28:01,685 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40814, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T14:28:01,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T14:28:01,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T14:28:01,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:28:01,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:01,689 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T14:28:01,689 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:01,689 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-09T14:28:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:28:01,690 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T14:28:01,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741835_1011 (size=405) 2024-12-09T14:28:01,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741835_1011 (size=405) 2024-12-09T14:28:01,709 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 18181be931f5cacde9a334f6a715a847, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460 2024-12-09T14:28:01,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741836_1012 (size=88) 2024-12-09T14:28:01,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741836_1012 (size=88) 2024-12-09T14:28:01,729 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:01,729 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 18181be931f5cacde9a334f6a715a847, disabling compactions & flushes 2024-12-09T14:28:01,729 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:01,729 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:01,729 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. after waiting 0 ms 2024-12-09T14:28:01,729 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:01,729 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:01,729 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 18181be931f5cacde9a334f6a715a847: Waiting for close lock at 1733754481729Disabling compacts and flushes for region at 1733754481729Disabling writes for close at 1733754481729Writing region close event to WAL at 1733754481729Closed at 1733754481729 2024-12-09T14:28:01,731 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T14:28:01,732 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733754481731"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733754481731"}]},"ts":"1733754481731"} 2024-12-09T14:28:01,739 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T14:28:01,743 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T14:28:01,743 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754481743"}]},"ts":"1733754481743"} 2024-12-09T14:28:01,748 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-09T14:28:01,748 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=18181be931f5cacde9a334f6a715a847, ASSIGN}] 2024-12-09T14:28:01,750 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=18181be931f5cacde9a334f6a715a847, ASSIGN 2024-12-09T14:28:01,751 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=18181be931f5cacde9a334f6a715a847, ASSIGN; state=OFFLINE, location=f4e784dc7cb5,33665,1733754480642; forceNewPlan=false, retain=false 2024-12-09T14:28:01,902 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=18181be931f5cacde9a334f6a715a847, regionState=OPENING, regionLocation=f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:01,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=18181be931f5cacde9a334f6a715a847, ASSIGN because future has completed 2024-12-09T14:28:01,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 18181be931f5cacde9a334f6a715a847, server=f4e784dc7cb5,33665,1733754480642}] 2024-12-09T14:28:02,062 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:02,062 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 18181be931f5cacde9a334f6a715a847, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:28:02,063 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,063 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:02,063 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,063 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,064 INFO [StoreOpener-18181be931f5cacde9a334f6a715a847-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,066 INFO [StoreOpener-18181be931f5cacde9a334f6a715a847-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18181be931f5cacde9a334f6a715a847 columnFamilyName info 2024-12-09T14:28:02,066 DEBUG [StoreOpener-18181be931f5cacde9a334f6a715a847-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:02,066 INFO [StoreOpener-18181be931f5cacde9a334f6a715a847-1 {}] regionserver.HStore(327): Store=18181be931f5cacde9a334f6a715a847/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:28:02,067 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,067 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,068 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,068 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,068 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,070 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,072 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:28:02,073 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 18181be931f5cacde9a334f6a715a847; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778239, jitterRate=-0.010418176651000977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T14:28:02,073 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:02,074 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 18181be931f5cacde9a334f6a715a847: Running coprocessor pre-open hook at 1733754482063Writing region info on filesystem at 1733754482063Initializing all the Stores at 1733754482064 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754482064Cleaning up temporary data from old regions at 1733754482068 (+4 ms)Running coprocessor post-open hooks at 1733754482073 (+5 ms)Region opened successfully at 1733754482074 (+1 ms) 2024-12-09T14:28:02,075 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847., pid=6, masterSystemTime=1733754482058 2024-12-09T14:28:02,078 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:02,078 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:02,079 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=18181be931f5cacde9a334f6a715a847, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:02,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 18181be931f5cacde9a334f6a715a847, server=f4e784dc7cb5,33665,1733754480642 because future has completed 2024-12-09T14:28:02,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T14:28:02,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 18181be931f5cacde9a334f6a715a847, server=f4e784dc7cb5,33665,1733754480642 in 178 msec 2024-12-09T14:28:02,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T14:28:02,089 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=18181be931f5cacde9a334f6a715a847, ASSIGN in 337 msec 2024-12-09T14:28:02,089 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T14:28:02,090 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754482089"}]},"ts":"1733754482089"} 2024-12-09T14:28:02,092 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-09T14:28:02,093 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T14:28:02,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 407 msec 2024-12-09T14:28:02,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:02,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:03,324 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:28:03,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:03,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:03,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:04,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:04,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:05,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:05,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:06,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:06,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:07,007 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T14:28:07,007 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-09T14:28:07,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:07,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:07,821 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T14:28:07,821 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T14:28:07,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:28:07,822 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T14:28:07,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T14:28:07,822 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T14:28:07,823 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:07,823 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T14:28:08,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:08,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:09,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:09,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:10,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:10,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:11,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:11,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:28:11,738 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T14:28:11,738 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-09T14:28:11,742 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:11,742 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:11,746 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847., hostname=f4e784dc7cb5,33665,1733754480642, seqNum=2] 2024-12-09T14:28:11,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:11,760 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T14:28:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T14:28:11,761 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T14:28:11,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T14:28:11,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33665 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T14:28:11,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:11,929 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 18181be931f5cacde9a334f6a715a847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T14:28:11,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/5a8674bd0cf04b7a91fdad067c3b5365 is 1080, key is row0001/info:/1733754491747/Put/seqid=0 2024-12-09T14:28:11,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741837_1013 (size=6033) 2024-12-09T14:28:11,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741837_1013 (size=6033) 2024-12-09T14:28:11,989 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/5a8674bd0cf04b7a91fdad067c3b5365 2024-12-09T14:28:11,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/5a8674bd0cf04b7a91fdad067c3b5365 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5a8674bd0cf04b7a91fdad067c3b5365 2024-12-09T14:28:12,007 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5a8674bd0cf04b7a91fdad067c3b5365, entries=1, sequenceid=5, filesize=5.9 K 2024-12-09T14:28:12,009 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 18181be931f5cacde9a334f6a715a847 in 80ms, sequenceid=5, compaction requested=false 2024-12-09T14:28:12,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 18181be931f5cacde9a334f6a715a847: 2024-12-09T14:28:12,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:12,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T14:28:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T14:28:12,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T14:28:12,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 252 msec 2024-12-09T14:28:12,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 265 msec 2024-12-09T14:28:12,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:12,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:13,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:13,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:14,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:14,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:15,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:15,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:16,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:16,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:17,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:17,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:18,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:18,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:19,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:19,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:20,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:20,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:21,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:21,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:21,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T14:28:21,798 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T14:28:21,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:21,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:21,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T14:28:21,804 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T14:28:21,805 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T14:28:21,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T14:28:21,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33665 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-09T14:28:21,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:21,960 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 18181be931f5cacde9a334f6a715a847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T14:28:21,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/c4356f0aa16f4161afdd9e090763d2bc is 1080, key is row0002/info:/1733754501799/Put/seqid=0 2024-12-09T14:28:21,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741838_1014 (size=6033) 2024-12-09T14:28:21,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741838_1014 (size=6033) 2024-12-09T14:28:21,972 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/c4356f0aa16f4161afdd9e090763d2bc 2024-12-09T14:28:21,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/c4356f0aa16f4161afdd9e090763d2bc as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/c4356f0aa16f4161afdd9e090763d2bc 2024-12-09T14:28:21,986 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/c4356f0aa16f4161afdd9e090763d2bc, entries=1, sequenceid=9, filesize=5.9 K 2024-12-09T14:28:21,988 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 18181be931f5cacde9a334f6a715a847 in 27ms, sequenceid=9, compaction requested=false 2024-12-09T14:28:21,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 18181be931f5cacde9a334f6a715a847: 2024-12-09T14:28:21,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:21,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-09T14:28:21,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-09T14:28:21,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T14:28:21,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-12-09T14:28:22,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-12-09T14:28:22,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:22,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:23,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:23,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 after 68052ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor191.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:28:23,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:23,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta after 68041ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor191.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T14:28:24,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:24,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:25,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:25,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:26,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:26,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:27,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:27,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:28,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:28,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:29,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:29,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:30,556 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T14:28:30,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:30,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:31,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:31,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T14:28:31,908 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T14:28:31,911 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C33665%2C1733754480642.1733754511910 2024-12-09T14:28:31,918 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:31,918 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:31,919 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:31,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:31,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:31,919 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754481177 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754511910 2024-12-09T14:28:31,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741833_1009 (size=5546) 2024-12-09T14:28:31,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741833_1009 (size=5546) 2024-12-09T14:28:31,926 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44705:44705),(127.0.0.1/127.0.0.1:35885:35885)] 2024-12-09T14:28:31,927 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T14:28:31,930 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T14:28:31,931 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T14:28:31,932 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T14:28:32,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33665 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-09T14:28:32,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:32,086 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 18181be931f5cacde9a334f6a715a847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T14:28:32,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/5c6da2972d3441b4853ddc402809ac16 is 1080, key is row0003/info:/1733754511909/Put/seqid=0 2024-12-09T14:28:32,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741840_1016 (size=6033) 2024-12-09T14:28:32,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741840_1016 (size=6033) 2024-12-09T14:28:32,105 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/5c6da2972d3441b4853ddc402809ac16 2024-12-09T14:28:32,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/5c6da2972d3441b4853ddc402809ac16 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5c6da2972d3441b4853ddc402809ac16 2024-12-09T14:28:32,118 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5c6da2972d3441b4853ddc402809ac16, entries=1, sequenceid=13, filesize=5.9 K 2024-12-09T14:28:32,119 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 18181be931f5cacde9a334f6a715a847 in 33ms, sequenceid=13, compaction requested=true 2024-12-09T14:28:32,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 18181be931f5cacde9a334f6a715a847: 2024-12-09T14:28:32,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:32,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-09T14:28:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-09T14:28:32,124 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T14:28:32,124 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-12-09T14:28:32,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 198 msec 2024-12-09T14:28:32,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:32,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:33,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:33,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:34,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:34,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:35,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:35,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:36,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:36,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:37,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:37,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:38,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:38,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:39,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:39,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:40,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:40,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:41,631 INFO [master/f4e784dc7cb5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T14:28:41,631 INFO [master/f4e784dc7cb5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T14:28:41,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:41,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T14:28:42,027 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T14:28:42,028 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:28:42,029 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:28:42,029 DEBUG [Time-limited test {}] regionserver.HStore(1541): 18181be931f5cacde9a334f6a715a847/info is initiating minor compaction (all files) 2024-12-09T14:28:42,029 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:28:42,029 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:42,029 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 18181be931f5cacde9a334f6a715a847/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:42,029 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5a8674bd0cf04b7a91fdad067c3b5365, hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/c4356f0aa16f4161afdd9e090763d2bc, hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5c6da2972d3441b4853ddc402809ac16] into tmpdir=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp, totalSize=17.7 K 2024-12-09T14:28:42,030 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5a8674bd0cf04b7a91fdad067c3b5365, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733754491747 2024-12-09T14:28:42,030 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c4356f0aa16f4161afdd9e090763d2bc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733754501799 2024-12-09T14:28:42,031 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5c6da2972d3441b4853ddc402809ac16, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733754511909 2024-12-09T14:28:42,041 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 18181be931f5cacde9a334f6a715a847#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:28:42,041 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/6a0b456136064e439bc61ee2fcb0af0a is 1080, key is row0001/info:/1733754491747/Put/seqid=0 2024-12-09T14:28:42,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741841_1017 (size=8296) 2024-12-09T14:28:42,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741841_1017 (size=8296) 2024-12-09T14:28:42,052 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/6a0b456136064e439bc61ee2fcb0af0a as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/6a0b456136064e439bc61ee2fcb0af0a 2024-12-09T14:28:42,058 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 18181be931f5cacde9a334f6a715a847/info of 18181be931f5cacde9a334f6a715a847 into 6a0b456136064e439bc61ee2fcb0af0a(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:28:42,058 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 18181be931f5cacde9a334f6a715a847: 2024-12-09T14:28:42,061 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C33665%2C1733754480642.1733754522060 2024-12-09T14:28:42,066 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:42,066 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:42,066 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:42,066 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:42,066 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:42,066 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754511910 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754522060 2024-12-09T14:28:42,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741839_1015 (size=2520) 2024-12-09T14:28:42,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741839_1015 (size=2520) 2024-12-09T14:28:42,071 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44705:44705),(127.0.0.1/127.0.0.1:35885:35885)] 2024-12-09T14:28:42,074 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754481177 to hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/oldWALs/f4e784dc7cb5%2C33665%2C1733754480642.1733754481177 2024-12-09T14:28:42,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:42,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T14:28:42,077 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T14:28:42,078 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T14:28:42,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T14:28:42,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33665 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-09T14:28:42,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:42,232 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 18181be931f5cacde9a334f6a715a847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T14:28:42,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/31841614fb3f490eaaf072297061df29 is 1080, key is row0000/info:/1733754522059/Put/seqid=0 2024-12-09T14:28:42,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741843_1019 (size=6033) 2024-12-09T14:28:42,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741843_1019 (size=6033) 2024-12-09T14:28:42,244 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/31841614fb3f490eaaf072297061df29 2024-12-09T14:28:42,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/31841614fb3f490eaaf072297061df29 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/31841614fb3f490eaaf072297061df29 2024-12-09T14:28:42,257 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/31841614fb3f490eaaf072297061df29, entries=1, sequenceid=18, filesize=5.9 K 2024-12-09T14:28:42,258 INFO [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 18181be931f5cacde9a334f6a715a847 in 27ms, sequenceid=18, compaction requested=false 2024-12-09T14:28:42,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 18181be931f5cacde9a334f6a715a847: 2024-12-09T14:28:42,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:42,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-09T14:28:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-09T14:28:42,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T14:28:42,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-12-09T14:28:42,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-12-09T14:28:42,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:42,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:43,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:43,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:44,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:44,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:45,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:45,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:46,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:46,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:47,063 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 18181be931f5cacde9a334f6a715a847, had cached 0 bytes from a total of 14329 2024-12-09T14:28:47,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:47,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:48,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:48,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:49,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:49,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:50,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:50,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:51,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:51,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37717 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T14:28:52,088 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T14:28:52,090 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C33665%2C1733754480642.1733754532090 2024-12-09T14:28:52,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,098 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,098 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,099 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754522060 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754532090 2024-12-09T14:28:52,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741842_1018 (size=2026) 2024-12-09T14:28:52,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741842_1018 (size=2026) 2024-12-09T14:28:52,101 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/WALs/f4e784dc7cb5,33665,1733754480642/f4e784dc7cb5%2C33665%2C1733754480642.1733754511910 to hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/oldWALs/f4e784dc7cb5%2C33665%2C1733754480642.1733754511910 2024-12-09T14:28:52,106 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44705:44705),(127.0.0.1/127.0.0.1:35885:35885)] 2024-12-09T14:28:52,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T14:28:52,107 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:28:52,107 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:28:52,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:28:52,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:28:52,107 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T14:28:52,107 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T14:28:52,107 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1615903126, stopped=false 2024-12-09T14:28:52,107 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f4e784dc7cb5,37717,1733754480574 2024-12-09T14:28:52,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:28:52,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:52,110 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:28:52,110 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:28:52,110 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:28:52,110 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:28:52,110 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f4e784dc7cb5,33665,1733754480642' ***** 2024-12-09T14:28:52,110 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T14:28:52,111 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T14:28:52,111 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(3091): Received CLOSE for 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(959): stopping server f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f4e784dc7cb5:33665. 2024-12-09T14:28:52,111 DEBUG [RS:0;f4e784dc7cb5:33665 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:28:52,111 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 18181be931f5cacde9a334f6a715a847, disabling compactions & flushes 2024-12-09T14:28:52,111 DEBUG [RS:0;f4e784dc7cb5:33665 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:28:52,111 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:52,111 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:52,111 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. after waiting 0 ms 2024-12-09T14:28:52,111 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T14:28:52,111 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T14:28:52,112 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T14:28:52,112 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 18181be931f5cacde9a334f6a715a847 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T14:28:52,112 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T14:28:52,112 DEBUG [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 18181be931f5cacde9a334f6a715a847=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.} 2024-12-09T14:28:52,112 DEBUG [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 18181be931f5cacde9a334f6a715a847 2024-12-09T14:28:52,112 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:28:52,112 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:28:52,112 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:28:52,112 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:28:52,112 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:28:52,112 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-09T14:28:52,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:28:52,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:52,115 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:28:52,117 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/8bc8fc50ae2f4ee0b5cd3bd91ea58bf7 is 1080, key is row0001/info:/1733754532089/Put/seqid=0 2024-12-09T14:28:52,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741845_1021 (size=6033) 2024-12-09T14:28:52,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741845_1021 (size=6033) 2024-12-09T14:28:52,122 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/8bc8fc50ae2f4ee0b5cd3bd91ea58bf7 2024-12-09T14:28:52,129 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/.tmp/info/8bc8fc50ae2f4ee0b5cd3bd91ea58bf7 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/8bc8fc50ae2f4ee0b5cd3bd91ea58bf7 2024-12-09T14:28:52,130 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/info/943aea2daa7c4c509a66ffbf27e4ac96 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847./info:regioninfo/1733754482079/Put/seqid=0 2024-12-09T14:28:52,134 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/8bc8fc50ae2f4ee0b5cd3bd91ea58bf7, entries=1, sequenceid=22, filesize=5.9 K 2024-12-09T14:28:52,135 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 18181be931f5cacde9a334f6a715a847 in 24ms, sequenceid=22, compaction requested=true 2024-12-09T14:28:52,136 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5a8674bd0cf04b7a91fdad067c3b5365, hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/c4356f0aa16f4161afdd9e090763d2bc, hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5c6da2972d3441b4853ddc402809ac16] to archive 2024-12-09T14:28:52,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741846_1022 (size=7308) 2024-12-09T14:28:52,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741846_1022 (size=7308) 2024-12-09T14:28:52,137 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T14:28:52,137 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/info/943aea2daa7c4c509a66ffbf27e4ac96 2024-12-09T14:28:52,139 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5a8674bd0cf04b7a91fdad067c3b5365 to hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5a8674bd0cf04b7a91fdad067c3b5365 2024-12-09T14:28:52,140 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/c4356f0aa16f4161afdd9e090763d2bc to hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/c4356f0aa16f4161afdd9e090763d2bc 2024-12-09T14:28:52,141 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5c6da2972d3441b4853ddc402809ac16 to hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/info/5c6da2972d3441b4853ddc402809ac16 2024-12-09T14:28:52,142 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f4e784dc7cb5:37717 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T14:28:52,142 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5a8674bd0cf04b7a91fdad067c3b5365=6033, c4356f0aa16f4161afdd9e090763d2bc=6033, 5c6da2972d3441b4853ddc402809ac16=6033] 2024-12-09T14:28:52,147 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/18181be931f5cacde9a334f6a715a847/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-09T14:28:52,148 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:52,148 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 18181be931f5cacde9a334f6a715a847: Waiting for close lock at 1733754532111Running coprocessor pre-close hooks at 1733754532111Disabling compacts and flushes for region at 1733754532111Disabling writes for close at 1733754532111Obtaining lock to block concurrent updates at 1733754532112 (+1 ms)Preparing flush snapshotting stores in 18181be931f5cacde9a334f6a715a847 at 1733754532112Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733754532112Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. at 1733754532113 (+1 ms)Flushing 18181be931f5cacde9a334f6a715a847/info: creating writer at 1733754532113Flushing 18181be931f5cacde9a334f6a715a847/info: appending metadata at 1733754532116 (+3 ms)Flushing 18181be931f5cacde9a334f6a715a847/info: closing flushed file at 1733754532117 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15789afb: reopening flushed file at 1733754532128 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 18181be931f5cacde9a334f6a715a847 in 24ms, sequenceid=22, compaction requested=true at 1733754532135 (+7 ms)Writing region close event to WAL at 1733754532144 (+9 ms)Running coprocessor post-close hooks at 1733754532148 (+4 ms)Closed at 1733754532148 2024-12-09T14:28:52,148 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733754481686.18181be931f5cacde9a334f6a715a847. 2024-12-09T14:28:52,157 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/ns/5303911c5490470c972b18df73a2a3f6 is 43, key is default/ns:d/1733754481579/Put/seqid=0 2024-12-09T14:28:52,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741847_1023 (size=5153) 2024-12-09T14:28:52,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741847_1023 (size=5153) 2024-12-09T14:28:52,161 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/ns/5303911c5490470c972b18df73a2a3f6 2024-12-09T14:28:52,180 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/table/b58dc946722344f981bb8a8354440300 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733754482089/Put/seqid=0 2024-12-09T14:28:52,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741848_1024 (size=5508) 2024-12-09T14:28:52,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741848_1024 (size=5508) 2024-12-09T14:28:52,185 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/table/b58dc946722344f981bb8a8354440300 2024-12-09T14:28:52,189 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/info/943aea2daa7c4c509a66ffbf27e4ac96 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/info/943aea2daa7c4c509a66ffbf27e4ac96 2024-12-09T14:28:52,193 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/info/943aea2daa7c4c509a66ffbf27e4ac96, entries=10, sequenceid=11, filesize=7.1 K 2024-12-09T14:28:52,194 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/ns/5303911c5490470c972b18df73a2a3f6 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/ns/5303911c5490470c972b18df73a2a3f6 2024-12-09T14:28:52,198 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/ns/5303911c5490470c972b18df73a2a3f6, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T14:28:52,199 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/.tmp/table/b58dc946722344f981bb8a8354440300 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/table/b58dc946722344f981bb8a8354440300 2024-12-09T14:28:52,203 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/table/b58dc946722344f981bb8a8354440300, entries=2, sequenceid=11, filesize=5.4 K 2024-12-09T14:28:52,204 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 92ms, sequenceid=11, compaction requested=false 2024-12-09T14:28:52,208 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T14:28:52,209 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:28:52,209 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:28:52,209 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754532112Running coprocessor pre-close hooks at 1733754532112Disabling compacts and flushes for region at 1733754532112Disabling writes for close at 1733754532112Obtaining lock to block concurrent updates at 1733754532112Preparing flush snapshotting stores in 1588230740 at 1733754532112Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733754532112Flushing stores of hbase:meta,,1.1588230740 at 1733754532113 (+1 ms)Flushing 1588230740/info: creating writer at 1733754532113Flushing 1588230740/info: appending metadata at 1733754532130 (+17 ms)Flushing 1588230740/info: closing flushed file at 1733754532130Flushing 1588230740/ns: creating writer at 1733754532142 (+12 ms)Flushing 1588230740/ns: appending metadata at 1733754532156 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733754532156Flushing 1588230740/table: creating writer at 1733754532166 (+10 ms)Flushing 1588230740/table: appending metadata at 1733754532180 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733754532180Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@de99710: reopening flushed file at 1733754532189 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3327fab9: reopening flushed file at 1733754532194 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@636587cd: reopening flushed file at 1733754532199 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 92ms, sequenceid=11, compaction requested=false at 1733754532204 (+5 ms)Writing region close event to WAL at 1733754532205 (+1 ms)Running coprocessor post-close hooks at 1733754532209 (+4 ms)Closed at 1733754532209 2024-12-09T14:28:52,209 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T14:28:52,312 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(976): stopping server f4e784dc7cb5,33665,1733754480642; all regions closed. 2024-12-09T14:28:52,313 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,313 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,313 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,313 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,313 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741834_1010 (size=3306) 2024-12-09T14:28:52,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741834_1010 (size=3306) 2024-12-09T14:28:52,318 DEBUG [RS:0;f4e784dc7cb5:33665 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/oldWALs 2024-12-09T14:28:52,318 INFO [RS:0;f4e784dc7cb5:33665 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C33665%2C1733754480642.meta:.meta(num 1733754481507) 2024-12-09T14:28:52,318 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,318 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,318 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,319 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,319 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741844_1020 (size=1252) 2024-12-09T14:28:52,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741844_1020 (size=1252) 2024-12-09T14:28:52,323 DEBUG [RS:0;f4e784dc7cb5:33665 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/oldWALs 2024-12-09T14:28:52,323 INFO [RS:0;f4e784dc7cb5:33665 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C33665%2C1733754480642:(num 1733754532090) 2024-12-09T14:28:52,323 DEBUG [RS:0;f4e784dc7cb5:33665 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:28:52,323 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:28:52,323 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:28:52,324 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.ChoreService(370): Chore service for: regionserver/f4e784dc7cb5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T14:28:52,324 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:28:52,324 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:28:52,324 INFO [RS:0;f4e784dc7cb5:33665 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33665 2024-12-09T14:28:52,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:28:52,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f4e784dc7cb5,33665,1733754480642 2024-12-09T14:28:52,326 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:28:52,327 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f4e784dc7cb5,33665,1733754480642] 2024-12-09T14:28:52,330 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f4e784dc7cb5,33665,1733754480642 already deleted, retry=false 2024-12-09T14:28:52,330 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f4e784dc7cb5,33665,1733754480642 expired; onlineServers=0 2024-12-09T14:28:52,330 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f4e784dc7cb5,37717,1733754480574' ***** 2024-12-09T14:28:52,330 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T14:28:52,330 INFO [M:0;f4e784dc7cb5:37717 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:28:52,330 INFO [M:0;f4e784dc7cb5:37717 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:28:52,330 DEBUG [M:0;f4e784dc7cb5:37717 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T14:28:52,330 DEBUG [M:0;f4e784dc7cb5:37717 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T14:28:52,330 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T14:28:52,330 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754480881 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754480881,5,FailOnTimeoutGroup] 2024-12-09T14:28:52,330 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754480881 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754480881,5,FailOnTimeoutGroup] 2024-12-09T14:28:52,330 INFO [M:0;f4e784dc7cb5:37717 {}] hbase.ChoreService(370): Chore service for: master/f4e784dc7cb5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T14:28:52,330 INFO [M:0;f4e784dc7cb5:37717 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:28:52,330 DEBUG [M:0;f4e784dc7cb5:37717 {}] master.HMaster(1795): Stopping service threads 2024-12-09T14:28:52,330 INFO [M:0;f4e784dc7cb5:37717 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T14:28:52,331 INFO [M:0;f4e784dc7cb5:37717 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:28:52,331 INFO [M:0;f4e784dc7cb5:37717 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T14:28:52,331 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T14:28:52,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T14:28:52,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:52,332 DEBUG [M:0;f4e784dc7cb5:37717 {}] zookeeper.ZKUtil(347): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T14:28:52,332 WARN [M:0;f4e784dc7cb5:37717 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T14:28:52,332 INFO [M:0;f4e784dc7cb5:37717 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/.lastflushedseqids 2024-12-09T14:28:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741849_1025 (size=130) 2024-12-09T14:28:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741849_1025 (size=130) 2024-12-09T14:28:52,338 INFO [M:0;f4e784dc7cb5:37717 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T14:28:52,338 INFO [M:0;f4e784dc7cb5:37717 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T14:28:52,338 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:28:52,338 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:52,338 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:52,338 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:28:52,338 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:52,338 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.58 KB heapSize=54.99 KB 2024-12-09T14:28:52,354 DEBUG [M:0;f4e784dc7cb5:37717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba914af180d5466f892c5c71ae6740f3 is 82, key is hbase:meta,,1/info:regioninfo/1733754481555/Put/seqid=0 2024-12-09T14:28:52,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741850_1026 (size=5672) 2024-12-09T14:28:52,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741850_1026 (size=5672) 2024-12-09T14:28:52,359 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba914af180d5466f892c5c71ae6740f3 2024-12-09T14:28:52,379 DEBUG [M:0;f4e784dc7cb5:37717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99427d74313e4c76b30a77908b4c7346 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733754482094/Put/seqid=0 2024-12-09T14:28:52,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741851_1027 (size=7822) 2024-12-09T14:28:52,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741851_1027 (size=7822) 2024-12-09T14:28:52,386 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.98 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99427d74313e4c76b30a77908b4c7346 2024-12-09T14:28:52,390 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 99427d74313e4c76b30a77908b4c7346 2024-12-09T14:28:52,405 DEBUG [M:0;f4e784dc7cb5:37717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a880a6a5ec6e450fb0a8f1edb70a0936 is 69, key is f4e784dc7cb5,33665,1733754480642/rs:state/1733754480991/Put/seqid=0 2024-12-09T14:28:52,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741852_1028 (size=5156) 2024-12-09T14:28:52,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741852_1028 (size=5156) 2024-12-09T14:28:52,410 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a880a6a5ec6e450fb0a8f1edb70a0936 2024-12-09T14:28:52,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:28:52,428 INFO [RS:0;f4e784dc7cb5:33665 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:28:52,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33665-0x1012b9577cf0001, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:28:52,428 INFO [RS:0;f4e784dc7cb5:33665 {}] regionserver.HRegionServer(1031): Exiting; stopping=f4e784dc7cb5,33665,1733754480642; zookeeper connection closed. 2024-12-09T14:28:52,428 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e3ff4f2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e3ff4f2 2024-12-09T14:28:52,428 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T14:28:52,429 DEBUG [M:0;f4e784dc7cb5:37717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ed01e1da3c9e4dd7ada1d61e8d9cbf4c is 52, key is load_balancer_on/state:d/1733754481682/Put/seqid=0 2024-12-09T14:28:52,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741853_1029 (size=5056) 2024-12-09T14:28:52,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741853_1029 (size=5056) 2024-12-09T14:28:52,434 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ed01e1da3c9e4dd7ada1d61e8d9cbf4c 2024-12-09T14:28:52,439 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba914af180d5466f892c5c71ae6740f3 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba914af180d5466f892c5c71ae6740f3 2024-12-09T14:28:52,444 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba914af180d5466f892c5c71ae6740f3, entries=8, sequenceid=121, filesize=5.5 K 2024-12-09T14:28:52,445 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99427d74313e4c76b30a77908b4c7346 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/99427d74313e4c76b30a77908b4c7346 2024-12-09T14:28:52,449 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 99427d74313e4c76b30a77908b4c7346 2024-12-09T14:28:52,449 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/99427d74313e4c76b30a77908b4c7346, entries=14, sequenceid=121, filesize=7.6 K 2024-12-09T14:28:52,450 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a880a6a5ec6e450fb0a8f1edb70a0936 as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a880a6a5ec6e450fb0a8f1edb70a0936 2024-12-09T14:28:52,455 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a880a6a5ec6e450fb0a8f1edb70a0936, entries=1, sequenceid=121, filesize=5.0 K 2024-12-09T14:28:52,456 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ed01e1da3c9e4dd7ada1d61e8d9cbf4c as hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ed01e1da3c9e4dd7ada1d61e8d9cbf4c 2024-12-09T14:28:52,461 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37369/user/jenkins/test-data/a178bc92-c114-43c3-7dd2-9e9944980460/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ed01e1da3c9e4dd7ada1d61e8d9cbf4c, entries=1, sequenceid=121, filesize=4.9 K 2024-12-09T14:28:52,462 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false 2024-12-09T14:28:52,464 INFO [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:52,464 DEBUG [M:0;f4e784dc7cb5:37717 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754532338Disabling compacts and flushes for region at 1733754532338Disabling writes for close at 1733754532338Obtaining lock to block concurrent updates at 1733754532338Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733754532338Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44629, getHeapSize=56248, getOffHeapSize=0, getCellsCount=140 at 1733754532338Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733754532339 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733754532339Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733754532354 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733754532354Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733754532364 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733754532378 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733754532379 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733754532390 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733754532404 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733754532404Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733754532414 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733754532429 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733754532429Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10a9aec4: reopening flushed file at 1733754532438 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3644f36: reopening flushed file at 1733754532444 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4626059d: reopening flushed file at 1733754532449 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ac2586d: reopening flushed file at 1733754532455 (+6 ms)Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false at 1733754532462 (+7 ms)Writing region close event to WAL at 1733754532464 (+2 ms)Closed at 1733754532464 2024-12-09T14:28:52,464 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,464 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,465 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,465 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,465 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:28:52,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39499 is added to blk_1073741830_1006 (size=53026) 2024-12-09T14:28:52,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46833 is added to blk_1073741830_1006 (size=53026) 2024-12-09T14:28:52,468 INFO [M:0;f4e784dc7cb5:37717 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T14:28:52,468 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:28:52,468 INFO [M:0;f4e784dc7cb5:37717 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37717 2024-12-09T14:28:52,468 INFO [M:0;f4e784dc7cb5:37717 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:28:52,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:28:52,570 INFO [M:0;f4e784dc7cb5:37717 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:28:52,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37717-0x1012b9577cf0000, quorum=127.0.0.1:59972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:28:52,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b8edabe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:28:52,573 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3838d9cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:28:52,573 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:28:52,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@739c2ff2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:28:52,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fc981fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.log.dir/,STOPPED} 2024-12-09T14:28:52,575 WARN [BP-1705338099-172.17.0.3-1733754479705 heartbeating to localhost/127.0.0.1:37369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:28:52,575 WARN [BP-1705338099-172.17.0.3-1733754479705 heartbeating to localhost/127.0.0.1:37369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1705338099-172.17.0.3-1733754479705 (Datanode Uuid 1630dcff-49cc-4890-a289-40a2429bd4aa) service to localhost/127.0.0.1:37369 2024-12-09T14:28:52,575 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:28:52,575 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:28:52,575 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/data/data3/current/BP-1705338099-172.17.0.3-1733754479705 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:28:52,576 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/data/data4/current/BP-1705338099-172.17.0.3-1733754479705 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:28:52,576 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:28:52,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ac76b28{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:28:52,579 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@371e2711{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:28:52,579 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:28:52,579 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45ae7776{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:28:52,579 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48bfafbe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.log.dir/,STOPPED} 2024-12-09T14:28:52,581 WARN [BP-1705338099-172.17.0.3-1733754479705 heartbeating to localhost/127.0.0.1:37369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:28:52,581 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:28:52,581 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:28:52,581 WARN [BP-1705338099-172.17.0.3-1733754479705 heartbeating to localhost/127.0.0.1:37369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1705338099-172.17.0.3-1733754479705 (Datanode Uuid 891e3663-3bb8-4529-9689-0cc80f24421c) service to localhost/127.0.0.1:37369 2024-12-09T14:28:52,581 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/data/data1/current/BP-1705338099-172.17.0.3-1733754479705 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:28:52,582 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/cluster_fb35d4cc-b662-2717-f5d5-2b238b688b19/data/data2/current/BP-1705338099-172.17.0.3-1733754479705 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:28:52,582 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:28:52,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12208e1b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:28:52,589 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56e526c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:28:52,589 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:28:52,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@521d1c5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:28:52,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@252e2abb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.log.dir/,STOPPED} 2024-12-09T14:28:52,596 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T14:28:52,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T14:28:52,625 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:37369 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37369 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/f4e784dc7cb5:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37369 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37369 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37369 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37369 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37369 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37369 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37369 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=230 (was 225) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4842 (was 5685) 2024-12-09T14:28:52,635 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=230, ProcessCount=11, AvailableMemoryMB=4842 2024-12-09T14:28:52,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T14:28:52,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.log.dir so I do NOT create it in target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab 2024-12-09T14:28:52,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7027f7b5-25e8-6e73-d94a-be63a7ab746c/hadoop.tmp.dir so I do NOT create it in target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab 2024-12-09T14:28:52,635 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf, deleteOnExit=true 2024-12-09T14:28:52,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T14:28:52,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/test.cache.data in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.log.dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T14:28:52,636 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/nfs.dump.dir in system properties and HBase conf 2024-12-09T14:28:52,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/java.io.tmpdir in system properties and HBase conf 2024-12-09T14:28:52,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:28:52,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T14:28:52,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T14:28:52,650 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:28:52,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:52,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:52,711 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:28:52,715 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:28:52,716 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:28:52,716 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:28:52,716 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:28:52,716 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:28:52,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fda4535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:28:52,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b767eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:28:52,832 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bdca924{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/java.io.tmpdir/jetty-localhost-42351-hadoop-hdfs-3_4_1-tests_jar-_-any-10582829093423903378/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:28:52,833 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27fb1a0a{HTTP/1.1, (http/1.1)}{localhost:42351} 2024-12-09T14:28:52,833 INFO [Time-limited test {}] server.Server(415): Started @240691ms 2024-12-09T14:28:52,846 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:28:53,027 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:28:53,032 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:28:53,033 INFO [regionserver/f4e784dc7cb5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:28:53,034 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:28:53,034 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:28:53,034 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:28:53,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a5ea7cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:28:53,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d8c7847{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:28:53,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4881a2ed{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/java.io.tmpdir/jetty-localhost-41105-hadoop-hdfs-3_4_1-tests_jar-_-any-1209872152864337265/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:28:53,178 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@374d3611{HTTP/1.1, (http/1.1)}{localhost:41105} 2024-12-09T14:28:53,178 INFO [Time-limited test {}] server.Server(415): Started @241036ms 2024-12-09T14:28:53,179 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:28:53,213 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:28:53,216 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:28:53,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:28:53,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:28:53,217 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:28:53,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2de80e16{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:28:53,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5689196f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:28:53,291 WARN [Thread-1948 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/data/data2/current/BP-1146165504-172.17.0.3-1733754532655/current, will proceed with Du for space computation calculation, 2024-12-09T14:28:53,291 WARN [Thread-1947 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/data/data1/current/BP-1146165504-172.17.0.3-1733754532655/current, will proceed with Du for space computation calculation, 2024-12-09T14:28:53,309 WARN [Thread-1926 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:28:53,312 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1210c677050bfe7 with lease ID 0x7d8c9515ec2755a9: Processing first storage report for DS-bb7f4688-8414-421b-9c84-37ccd11c5df2 from datanode DatanodeRegistration(127.0.0.1:37655, datanodeUuid=3e99010d-90a7-43df-b927-c1c74f03c18b, infoPort=40043, infoSecurePort=0, ipcPort=34723, storageInfo=lv=-57;cid=testClusterID;nsid=864500658;c=1733754532655) 2024-12-09T14:28:53,312 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1210c677050bfe7 with lease ID 0x7d8c9515ec2755a9: from storage DS-bb7f4688-8414-421b-9c84-37ccd11c5df2 node DatanodeRegistration(127.0.0.1:37655, datanodeUuid=3e99010d-90a7-43df-b927-c1c74f03c18b, infoPort=40043, infoSecurePort=0, ipcPort=34723, storageInfo=lv=-57;cid=testClusterID;nsid=864500658;c=1733754532655), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:28:53,312 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1210c677050bfe7 with lease ID 0x7d8c9515ec2755a9: Processing first storage report for DS-eaaff040-78d6-42fd-a04a-69b19f8e491f from datanode DatanodeRegistration(127.0.0.1:37655, datanodeUuid=3e99010d-90a7-43df-b927-c1c74f03c18b, infoPort=40043, infoSecurePort=0, ipcPort=34723, storageInfo=lv=-57;cid=testClusterID;nsid=864500658;c=1733754532655) 2024-12-09T14:28:53,312 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1210c677050bfe7 with lease ID 0x7d8c9515ec2755a9: from storage DS-eaaff040-78d6-42fd-a04a-69b19f8e491f node DatanodeRegistration(127.0.0.1:37655, datanodeUuid=3e99010d-90a7-43df-b927-c1c74f03c18b, infoPort=40043, infoSecurePort=0, ipcPort=34723, storageInfo=lv=-57;cid=testClusterID;nsid=864500658;c=1733754532655), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:28:53,355 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f87fe6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/java.io.tmpdir/jetty-localhost-42147-hadoop-hdfs-3_4_1-tests_jar-_-any-7732473830697471551/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:28:53,355 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64d2170c{HTTP/1.1, (http/1.1)}{localhost:42147} 2024-12-09T14:28:53,355 INFO [Time-limited test {}] server.Server(415): Started @241213ms 2024-12-09T14:28:53,357 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:28:53,462 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/data/data3/current/BP-1146165504-172.17.0.3-1733754532655/current, will proceed with Du for space computation calculation, 2024-12-09T14:28:53,462 WARN [Thread-1974 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/data/data4/current/BP-1146165504-172.17.0.3-1733754532655/current, will proceed with Du for space computation calculation, 2024-12-09T14:28:53,485 WARN [Thread-1962 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:28:53,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x74d53cfee387a7ca with lease ID 0x7d8c9515ec2755aa: Processing first storage report for DS-185c80b2-2806-4cca-bef0-f5373e932167 from datanode DatanodeRegistration(127.0.0.1:35043, datanodeUuid=3dd3c2fb-0b42-4fbe-b24e-afd1caa41720, infoPort=41803, infoSecurePort=0, ipcPort=46677, storageInfo=lv=-57;cid=testClusterID;nsid=864500658;c=1733754532655) 2024-12-09T14:28:53,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x74d53cfee387a7ca with lease ID 0x7d8c9515ec2755aa: from storage DS-185c80b2-2806-4cca-bef0-f5373e932167 node DatanodeRegistration(127.0.0.1:35043, datanodeUuid=3dd3c2fb-0b42-4fbe-b24e-afd1caa41720, infoPort=41803, infoSecurePort=0, ipcPort=46677, storageInfo=lv=-57;cid=testClusterID;nsid=864500658;c=1733754532655), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:28:53,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x74d53cfee387a7ca with lease ID 0x7d8c9515ec2755aa: Processing first storage report for DS-6f70677a-7818-4964-9495-784ff2ab5340 from datanode DatanodeRegistration(127.0.0.1:35043, datanodeUuid=3dd3c2fb-0b42-4fbe-b24e-afd1caa41720, infoPort=41803, infoSecurePort=0, ipcPort=46677, storageInfo=lv=-57;cid=testClusterID;nsid=864500658;c=1733754532655) 2024-12-09T14:28:53,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x74d53cfee387a7ca with lease ID 0x7d8c9515ec2755aa: from storage DS-6f70677a-7818-4964-9495-784ff2ab5340 node DatanodeRegistration(127.0.0.1:35043, datanodeUuid=3dd3c2fb-0b42-4fbe-b24e-afd1caa41720, infoPort=41803, infoSecurePort=0, ipcPort=46677, storageInfo=lv=-57;cid=testClusterID;nsid=864500658;c=1733754532655), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:28:53,585 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab 2024-12-09T14:28:53,588 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/zookeeper_0, clientPort=55084, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T14:28:53,590 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55084 2024-12-09T14:28:53,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:53,591 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:53,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:28:53,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:28:53,601 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b with version=8 2024-12-09T14:28:53,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase-staging 2024-12-09T14:28:53,603 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:28:53,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:53,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:53,603 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:28:53,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:53,603 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:28:53,603 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T14:28:53,603 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:28:53,604 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46445 2024-12-09T14:28:53,605 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46445 connecting to ZooKeeper ensemble=127.0.0.1:55084 2024-12-09T14:28:53,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:464450x0, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:28:53,611 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46445-0x1012b9646f20000 connected 2024-12-09T14:28:53,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:53,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:53,632 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:28:53,633 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b, hbase.cluster.distributed=false 2024-12-09T14:28:53,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:28:53,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46445 2024-12-09T14:28:53,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46445 2024-12-09T14:28:53,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46445 2024-12-09T14:28:53,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46445 2024-12-09T14:28:53,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46445 2024-12-09T14:28:53,657 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:28:53,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:53,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:53,657 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:28:53,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:28:53,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:28:53,657 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T14:28:53,658 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:28:53,658 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34999 2024-12-09T14:28:53,660 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34999 connecting to ZooKeeper ensemble=127.0.0.1:55084 2024-12-09T14:28:53,660 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:53,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:53,662 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:53,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:349990x0, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:28:53,668 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:349990x0, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:28:53,668 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T14:28:53,671 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34999-0x1012b9646f20001 connected 2024-12-09T14:28:53,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:53,672 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T14:28:53,672 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T14:28:53,673 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:28:53,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34999 2024-12-09T14:28:53,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34999 2024-12-09T14:28:53,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34999 2024-12-09T14:28:53,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34999 2024-12-09T14:28:53,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34999 2024-12-09T14:28:53,688 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f4e784dc7cb5:46445 2024-12-09T14:28:53,689 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:53,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:28:53,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:28:53,693 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:53,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T14:28:53,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,696 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T14:28:53,696 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f4e784dc7cb5,46445,1733754533603 from backup master directory 2024-12-09T14:28:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:28:53,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:28:53,697 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:28:53,698 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:53,704 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/hbase.id] with ID: 8335abcd-83b2-4bb0-bb9f-ef9f55c514a6 2024-12-09T14:28:53,704 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/.tmp/hbase.id 2024-12-09T14:28:53,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:28:53,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:28:53,713 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/.tmp/hbase.id]:[hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/hbase.id] 2024-12-09T14:28:53,729 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:53,729 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T14:28:53,730 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T14:28:53,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:28:53,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:28:53,744 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:28:53,745 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T14:28:53,745 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:28:53,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:28:53,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:28:53,759 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store 2024-12-09T14:28:53,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:28:53,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:28:53,767 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:53,767 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:28:53,767 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:53,767 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:53,767 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:28:53,767 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:53,767 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:28:53,767 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754533767Disabling compacts and flushes for region at 1733754533767Disabling writes for close at 1733754533767Writing region close event to WAL at 1733754533767Closed at 1733754533767 2024-12-09T14:28:53,768 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/.initializing 2024-12-09T14:28:53,768 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/WALs/f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:53,771 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C46445%2C1733754533603, suffix=, logDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/WALs/f4e784dc7cb5,46445,1733754533603, archiveDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/oldWALs, maxLogs=10 2024-12-09T14:28:53,772 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C46445%2C1733754533603.1733754533771 2024-12-09T14:28:53,780 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/WALs/f4e784dc7cb5,46445,1733754533603/f4e784dc7cb5%2C46445%2C1733754533603.1733754533771 2024-12-09T14:28:53,798 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41803:41803),(127.0.0.1/127.0.0.1:40043:40043)] 2024-12-09T14:28:53,802 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:28:53,802 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:53,803 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,803 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,805 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,807 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T14:28:53,807 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,807 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:53,808 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,809 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T14:28:53,809 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,809 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:28:53,810 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T14:28:53,811 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:28:53,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T14:28:53,812 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,813 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:28:53,813 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,814 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,814 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,815 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,815 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,815 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T14:28:53,816 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:28:53,819 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:28:53,819 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758412, jitterRate=-0.03562965989112854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T14:28:53,820 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733754533803Initializing all the Stores at 1733754533804 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754533804Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754533804Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754533804Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754533804Cleaning up temporary data from old regions at 1733754533815 (+11 ms)Region opened successfully at 1733754533820 (+5 ms) 2024-12-09T14:28:53,822 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T14:28:53,826 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@369fab3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:28:53,826 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T14:28:53,827 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T14:28:53,827 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T14:28:53,827 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T14:28:53,827 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T14:28:53,828 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T14:28:53,828 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T14:28:53,829 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T14:28:53,830 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T14:28:53,832 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T14:28:53,833 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T14:28:53,833 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T14:28:53,835 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T14:28:53,835 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T14:28:53,836 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T14:28:53,837 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T14:28:53,837 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T14:28:53,838 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T14:28:53,840 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T14:28:53,846 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T14:28:53,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:28:53,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:28:53,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,848 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f4e784dc7cb5,46445,1733754533603, sessionid=0x1012b9646f20000, setting cluster-up flag (Was=false) 2024-12-09T14:28:53,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,857 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T14:28:53,858 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:53,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:53,867 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T14:28:53,868 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:53,870 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T14:28:53,871 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T14:28:53,872 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T14:28:53,872 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T14:28:53,872 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f4e784dc7cb5,46445,1733754533603 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T14:28:53,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:28:53,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:28:53,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:28:53,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:28:53,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f4e784dc7cb5:0, corePoolSize=10, maxPoolSize=10 2024-12-09T14:28:53,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:28:53,873 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,875 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733754563875 2024-12-09T14:28:53,875 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T14:28:53,876 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T14:28:53,876 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T14:28:53,876 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T14:28:53,876 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T14:28:53,876 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T14:28:53,876 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(746): ClusterId : 8335abcd-83b2-4bb0-bb9f-ef9f55c514a6 2024-12-09T14:28:53,877 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T14:28:53,877 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:28:53,877 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T14:28:53,878 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,878 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T14:28:53,878 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,880 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T14:28:53,880 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T14:28:53,882 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T14:28:53,882 DEBUG [RS:0;f4e784dc7cb5:34999 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69e7c0cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:28:53,882 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T14:28:53,882 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T14:28:53,883 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T14:28:53,886 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T14:28:53,886 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T14:28:53,887 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754533886,5,FailOnTimeoutGroup] 2024-12-09T14:28:53,890 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754533887,5,FailOnTimeoutGroup] 2024-12-09T14:28:53,890 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,890 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T14:28:53,890 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,890 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:28:53,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:28:53,898 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T14:28:53,898 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b 2024-12-09T14:28:53,901 DEBUG [RS:0;f4e784dc7cb5:34999 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f4e784dc7cb5:34999 2024-12-09T14:28:53,901 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T14:28:53,901 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T14:28:53,901 DEBUG [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T14:28:53,902 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,46445,1733754533603 with port=34999, startcode=1733754533657 2024-12-09T14:28:53,903 DEBUG [RS:0;f4e784dc7cb5:34999 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T14:28:53,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:28:53,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:28:53,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:53,915 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56477, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T14:28:53,916 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46445 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:53,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:28:53,916 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46445 {}] master.ServerManager(517): Registering regionserver=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:53,917 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:28:53,917 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,919 DEBUG [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b 2024-12-09T14:28:53,919 DEBUG [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46419 2024-12-09T14:28:53,919 DEBUG [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T14:28:53,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:53,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:28:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:28:53,921 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:28:53,921 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:53,922 DEBUG [RS:0;f4e784dc7cb5:34999 {}] zookeeper.ZKUtil(111): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:53,922 WARN [RS:0;f4e784dc7cb5:34999 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:28:53,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:28:53,922 INFO [RS:0;f4e784dc7cb5:34999 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:28:53,922 DEBUG [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:53,923 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f4e784dc7cb5,34999,1733754533657] 2024-12-09T14:28:53,923 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:28:53,923 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:53,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:28:53,925 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:28:53,925 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:53,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:53,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:28:53,926 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T14:28:53,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740 2024-12-09T14:28:53,928 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T14:28:53,928 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740 2024-12-09T14:28:53,928 INFO [RS:0;f4e784dc7cb5:34999 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:28:53,928 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,930 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:28:53,930 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:28:53,930 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T14:28:53,930 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:28:53,932 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T14:28:53,932 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,932 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,932 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,932 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,932 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,932 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,932 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:28:53,932 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,933 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,933 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,933 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,933 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,933 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:28:53,933 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:28:53,933 DEBUG [RS:0;f4e784dc7cb5:34999 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:28:53,933 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,933 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,933 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,933 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,933 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,933 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,34999,1733754533657-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:28:53,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:28:53,947 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:28:53,948 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700704, jitterRate=-0.10900978744029999}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:28:53,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733754533912Initializing all the Stores at 1733754533914 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754533914Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754533915 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754533915Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754533915Cleaning up temporary data from old regions at 1733754533930 (+15 ms)Region opened successfully at 1733754533949 (+19 ms) 2024-12-09T14:28:53,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:28:53,949 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:28:53,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:28:53,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:28:53,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:28:53,951 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:28:53,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754533949Disabling compacts and flushes for region at 1733754533949Disabling writes for close at 1733754533949Writing region close event to WAL at 1733754533951 (+2 ms)Closed at 1733754533951 2024-12-09T14:28:53,952 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:28:53,953 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T14:28:53,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T14:28:53,957 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:28:53,960 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T14:28:53,962 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T14:28:53,962 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,34999,1733754533657-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,962 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,962 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.Replication(171): f4e784dc7cb5,34999,1733754533657 started 2024-12-09T14:28:53,984 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:53,984 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1482): Serving as f4e784dc7cb5,34999,1733754533657, RpcServer on f4e784dc7cb5/172.17.0.3:34999, sessionid=0x1012b9646f20001 2024-12-09T14:28:53,984 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T14:28:53,984 DEBUG [RS:0;f4e784dc7cb5:34999 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:53,984 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,34999,1733754533657' 2024-12-09T14:28:53,984 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T14:28:53,985 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T14:28:53,986 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T14:28:53,986 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T14:28:53,986 DEBUG [RS:0;f4e784dc7cb5:34999 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:53,986 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,34999,1733754533657' 2024-12-09T14:28:53,986 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T14:28:53,986 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T14:28:53,987 DEBUG [RS:0;f4e784dc7cb5:34999 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T14:28:53,987 INFO [RS:0;f4e784dc7cb5:34999 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T14:28:53,987 INFO [RS:0;f4e784dc7cb5:34999 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T14:28:54,089 INFO [RS:0;f4e784dc7cb5:34999 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C34999%2C1733754533657, suffix=, logDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657, archiveDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/oldWALs, maxLogs=32 2024-12-09T14:28:54,089 INFO [RS:0;f4e784dc7cb5:34999 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C34999%2C1733754533657.1733754534089 2024-12-09T14:28:54,095 INFO [RS:0;f4e784dc7cb5:34999 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754534089 2024-12-09T14:28:54,098 DEBUG [RS:0;f4e784dc7cb5:34999 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40043:40043),(127.0.0.1/127.0.0.1:41803:41803)] 2024-12-09T14:28:54,111 DEBUG [f4e784dc7cb5:46445 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T14:28:54,111 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:54,112 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,34999,1733754533657, state=OPENING 2024-12-09T14:28:54,114 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T14:28:54,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:54,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:28:54,116 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:28:54,117 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:28:54,117 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:28:54,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,34999,1733754533657}] 2024-12-09T14:28:54,270 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T14:28:54,272 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60665, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T14:28:54,275 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T14:28:54,275 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:28:54,277 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C34999%2C1733754533657.meta, suffix=.meta, logDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657, archiveDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/oldWALs, maxLogs=32 2024-12-09T14:28:54,277 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C34999%2C1733754533657.meta.1733754534277.meta 2024-12-09T14:28:54,281 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.meta.1733754534277.meta 2024-12-09T14:28:54,282 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41803:41803),(127.0.0.1/127.0.0.1:40043:40043)] 2024-12-09T14:28:54,283 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:28:54,283 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T14:28:54,283 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T14:28:54,283 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T14:28:54,283 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T14:28:54,283 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:54,283 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T14:28:54,284 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T14:28:54,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:28:54,285 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:28:54,286 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:54,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:54,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:28:54,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:28:54,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:54,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:54,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:28:54,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:28:54,288 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:54,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:54,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:28:54,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:28:54,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:54,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:28:54,289 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:28:54,290 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740 2024-12-09T14:28:54,291 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740 2024-12-09T14:28:54,291 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:28:54,291 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:28:54,292 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:28:54,293 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:28:54,293 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747527, jitterRate=-0.0494714081287384}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:28:54,294 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T14:28:54,294 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733754534284Writing region info on filesystem at 1733754534284Initializing all the Stores at 1733754534284Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754534284Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754534284Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754534284Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754534284Cleaning up temporary data from old regions at 1733754534291 (+7 ms)Running coprocessor post-open hooks at 1733754534294 (+3 ms)Region opened successfully at 1733754534294 2024-12-09T14:28:54,295 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733754534269 2024-12-09T14:28:54,297 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T14:28:54,297 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T14:28:54,298 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:54,299 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,34999,1733754533657, state=OPEN 2024-12-09T14:28:54,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:28:54,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:28:54,304 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:54,304 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:28:54,304 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:28:54,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T14:28:54,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,34999,1733754533657 in 187 msec 2024-12-09T14:28:54,309 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T14:28:54,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 354 msec 2024-12-09T14:28:54,310 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:28:54,310 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T14:28:54,312 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:28:54,312 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,34999,1733754533657, seqNum=-1] 2024-12-09T14:28:54,312 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:28:54,313 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56905, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:28:54,318 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 446 msec 2024-12-09T14:28:54,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733754534318, completionTime=-1 2024-12-09T14:28:54,318 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T14:28:54,318 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T14:28:54,320 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T14:28:54,320 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733754594320 2024-12-09T14:28:54,320 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733754654320 2024-12-09T14:28:54,320 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T14:28:54,321 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,46445,1733754533603-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:54,321 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,46445,1733754533603-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:54,321 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,46445,1733754533603-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:54,321 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f4e784dc7cb5:46445, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:54,321 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:54,321 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:54,322 DEBUG [master/f4e784dc7cb5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T14:28:54,325 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.627sec 2024-12-09T14:28:54,325 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T14:28:54,325 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T14:28:54,325 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T14:28:54,325 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T14:28:54,325 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T14:28:54,325 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,46445,1733754533603-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:28:54,325 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,46445,1733754533603-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T14:28:54,327 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T14:28:54,328 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T14:28:54,328 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,46445,1733754533603-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:28:54,377 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ced388d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:28:54,377 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f4e784dc7cb5,46445,-1 for getting cluster id 2024-12-09T14:28:54,377 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T14:28:54,379 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8335abcd-83b2-4bb0-bb9f-ef9f55c514a6' 2024-12-09T14:28:54,379 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T14:28:54,380 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8335abcd-83b2-4bb0-bb9f-ef9f55c514a6" 2024-12-09T14:28:54,380 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31402044, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:28:54,381 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f4e784dc7cb5,46445,-1] 2024-12-09T14:28:54,381 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T14:28:54,381 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:28:54,382 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41440, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T14:28:54,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72925ee1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:28:54,384 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:28:54,385 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,34999,1733754533657, seqNum=-1] 2024-12-09T14:28:54,385 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:28:54,386 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41394, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:28:54,388 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:54,388 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:28:54,390 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T14:28:54,391 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T14:28:54,391 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is f4e784dc7cb5,46445,1733754533603 2024-12-09T14:28:54,391 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5bc5c616 2024-12-09T14:28:54,392 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T14:28:54,392 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41448, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T14:28:54,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46445 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T14:28:54,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46445 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T14:28:54,393 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46445 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:28:54,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46445 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-09T14:28:54,396 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T14:28:54,396 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:54,396 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46445 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-09T14:28:54,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46445 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:28:54,397 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T14:28:54,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741835_1011 (size=381) 2024-12-09T14:28:54,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741835_1011 (size=381) 2024-12-09T14:28:54,405 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0862a744e65ec9e18a19a29534f5321e, NAME => 'TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b 2024-12-09T14:28:54,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741836_1012 (size=64) 2024-12-09T14:28:54,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741836_1012 (size=64) 2024-12-09T14:28:54,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:54,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0862a744e65ec9e18a19a29534f5321e, disabling compactions & flushes 2024-12-09T14:28:54,411 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:28:54,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:28:54,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. after waiting 0 ms 2024-12-09T14:28:54,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:28:54,411 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:28:54,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0862a744e65ec9e18a19a29534f5321e: Waiting for close lock at 1733754534411Disabling compacts and flushes for region at 1733754534411Disabling writes for close at 1733754534411Writing region close event to WAL at 1733754534411Closed at 1733754534411 2024-12-09T14:28:54,413 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T14:28:54,413 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733754534413"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733754534413"}]},"ts":"1733754534413"} 2024-12-09T14:28:54,415 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T14:28:54,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T14:28:54,416 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754534416"}]},"ts":"1733754534416"} 2024-12-09T14:28:54,418 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-09T14:28:54,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, ASSIGN}] 2024-12-09T14:28:54,419 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, ASSIGN 2024-12-09T14:28:54,420 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, ASSIGN; state=OFFLINE, location=f4e784dc7cb5,34999,1733754533657; forceNewPlan=false, retain=false 2024-12-09T14:28:54,571 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0862a744e65ec9e18a19a29534f5321e, regionState=OPENING, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:54,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, ASSIGN because future has completed 2024-12-09T14:28:54,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657}] 2024-12-09T14:28:54,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:54,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:54,731 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:28:54,731 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0862a744e65ec9e18a19a29534f5321e, NAME => 'TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:28:54,731 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,731 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:28:54,732 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,732 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,733 INFO [StoreOpener-0862a744e65ec9e18a19a29534f5321e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,734 INFO [StoreOpener-0862a744e65ec9e18a19a29534f5321e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0862a744e65ec9e18a19a29534f5321e columnFamilyName info 2024-12-09T14:28:54,734 DEBUG [StoreOpener-0862a744e65ec9e18a19a29534f5321e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:28:54,735 INFO [StoreOpener-0862a744e65ec9e18a19a29534f5321e-1 {}] regionserver.HStore(327): Store=0862a744e65ec9e18a19a29534f5321e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:28:54,735 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,735 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,736 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,736 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,736 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,737 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,739 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:28:54,740 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0862a744e65ec9e18a19a29534f5321e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861869, jitterRate=0.09592385590076447}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T14:28:54,740 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:28:54,740 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0862a744e65ec9e18a19a29534f5321e: Running coprocessor pre-open hook at 1733754534732Writing region info on filesystem at 1733754534732Initializing all the Stores at 1733754534732Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754534732Cleaning up temporary data from old regions at 1733754534736 (+4 ms)Running coprocessor post-open hooks at 1733754534740 (+4 ms)Region opened successfully at 1733754534740 2024-12-09T14:28:54,741 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., pid=6, masterSystemTime=1733754534727 2024-12-09T14:28:54,743 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:28:54,743 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:28:54,744 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0862a744e65ec9e18a19a29534f5321e, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:28:54,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 because future has completed 2024-12-09T14:28:54,750 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T14:28:54,750 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 in 173 msec 2024-12-09T14:28:54,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T14:28:54,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, ASSIGN in 332 msec 2024-12-09T14:28:54,754 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T14:28:54,754 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733754534754"}]},"ts":"1733754534754"} 2024-12-09T14:28:54,756 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-09T14:28:54,757 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T14:28:54,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 364 msec 2024-12-09T14:28:55,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:55,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:56,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:56,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:57,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:57,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:57,681 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:28:57,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,685 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,685 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,685 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,714 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:28:57,821 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T14:28:57,822 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T14:28:57,822 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T14:28:58,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:58,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:59,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:59,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:28:59,926 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T14:28:59,927 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-09T14:29:00,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:00,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:01,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:01,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:02,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:02,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:03,324 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:29:03,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:03,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:03,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:04,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46445 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T14:29:04,468 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-09T14:29:04,468 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-09T14:29:04,471 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-09T14:29:04,471 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:29:04,475 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2] 2024-12-09T14:29:04,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:04,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0862a744e65ec9e18a19a29534f5321e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:29:04,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/85483f5423534826b82c4e6365f65431 is 1080, key is row0001/info:/1733754544479/Put/seqid=0 2024-12-09T14:29:04,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741837_1013 (size=12509) 2024-12-09T14:29:04,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741837_1013 (size=12509) 2024-12-09T14:29:04,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/85483f5423534826b82c4e6365f65431 2024-12-09T14:29:04,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/85483f5423534826b82c4e6365f65431 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/85483f5423534826b82c4e6365f65431 2024-12-09T14:29:04,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T14:29:04,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41394 deadline: 1733754554554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:04,562 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T14:29:04,563 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T14:29:04,563 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 because the exception is null or not the one we care about 2024-12-09T14:29:04,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/85483f5423534826b82c4e6365f65431, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T14:29:04,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 0862a744e65ec9e18a19a29534f5321e in 70ms, sequenceid=11, compaction requested=false 2024-12-09T14:29:04,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0862a744e65ec9e18a19a29534f5321e: 2024-12-09T14:29:04,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:04,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:05,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:05,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:06,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:06,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:07,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:07,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:08,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:08,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:09,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:09,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:10,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:10,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:11,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:11,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:12,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:12,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:13,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:13,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:14,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:14,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0862a744e65ec9e18a19a29534f5321e 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T14:29:14,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/bce17a6b75ed4bcfb8fce2d270057052 is 1080, key is row0008/info:/1733754544502/Put/seqid=0 2024-12-09T14:29:14,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741838_1014 (size=29761) 2024-12-09T14:29:14,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741838_1014 (size=29761) 2024-12-09T14:29:14,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/bce17a6b75ed4bcfb8fce2d270057052 2024-12-09T14:29:14,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/bce17a6b75ed4bcfb8fce2d270057052 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052 2024-12-09T14:29:14,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052, entries=23, sequenceid=37, filesize=29.1 K 2024-12-09T14:29:14,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 0862a744e65ec9e18a19a29534f5321e in 23ms, sequenceid=37, compaction requested=false 2024-12-09T14:29:14,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0862a744e65ec9e18a19a29534f5321e: 2024-12-09T14:29:14,632 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-09T14:29:14,632 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:14,632 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052 because midkey is the same as first or last row 2024-12-09T14:29:14,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:14,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:15,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:15,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:16,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:16,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0862a744e65ec9e18a19a29534f5321e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:29:16,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/01ed64984bf843a28c34b4f5e84366f2 is 1080, key is row0031/info:/1733754554610/Put/seqid=0 2024-12-09T14:29:16,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741839_1015 (size=12509) 2024-12-09T14:29:16,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741839_1015 (size=12509) 2024-12-09T14:29:16,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/01ed64984bf843a28c34b4f5e84366f2 2024-12-09T14:29:16,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/01ed64984bf843a28c34b4f5e84366f2 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/01ed64984bf843a28c34b4f5e84366f2 2024-12-09T14:29:16,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/01ed64984bf843a28c34b4f5e84366f2, entries=7, sequenceid=47, filesize=12.2 K 2024-12-09T14:29:16,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 0862a744e65ec9e18a19a29534f5321e in 33ms, sequenceid=47, compaction requested=true 2024-12-09T14:29:16,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0862a744e65ec9e18a19a29534f5321e: 2024-12-09T14:29:16,654 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-09T14:29:16,654 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:16,654 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052 because midkey is the same as first or last row 2024-12-09T14:29:16,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0862a744e65ec9e18a19a29534f5321e:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:29:16,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:16,655 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:29:16,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:16,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0862a744e65ec9e18a19a29534f5321e 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T14:29:16,656 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:29:16,656 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): 0862a744e65ec9e18a19a29534f5321e/info is initiating minor compaction (all files) 2024-12-09T14:29:16,656 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0862a744e65ec9e18a19a29534f5321e/info in TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:29:16,657 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/85483f5423534826b82c4e6365f65431, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/01ed64984bf843a28c34b4f5e84366f2] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp, totalSize=53.5 K 2024-12-09T14:29:16,657 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 85483f5423534826b82c4e6365f65431, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733754544479 2024-12-09T14:29:16,658 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting bce17a6b75ed4bcfb8fce2d270057052, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733754544502 2024-12-09T14:29:16,658 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 01ed64984bf843a28c34b4f5e84366f2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733754554610 2024-12-09T14:29:16,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/3ddc170903104dada354117d7dc91164 is 1080, key is row0038/info:/1733754556622/Put/seqid=0 2024-12-09T14:29:16,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741840_1016 (size=24376) 2024-12-09T14:29:16,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741840_1016 (size=24376) 2024-12-09T14:29:16,673 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0862a744e65ec9e18a19a29534f5321e#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:29:16,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=68 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/3ddc170903104dada354117d7dc91164 2024-12-09T14:29:16,673 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/1d5296ef97744a9791ee8bfd5ca93dc8 is 1080, key is row0001/info:/1733754544479/Put/seqid=0 2024-12-09T14:29:16,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:16,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741841_1017 (size=44978) 2024-12-09T14:29:16,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741841_1017 (size=44978) 2024-12-09T14:29:16,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/3ddc170903104dada354117d7dc91164 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/3ddc170903104dada354117d7dc91164 2024-12-09T14:29:16,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:16,685 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/1d5296ef97744a9791ee8bfd5ca93dc8 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8 2024-12-09T14:29:16,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/3ddc170903104dada354117d7dc91164, entries=18, sequenceid=68, filesize=23.8 K 2024-12-09T14:29:16,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=9.46 KB/9684 for 0862a744e65ec9e18a19a29534f5321e in 31ms, sequenceid=68, compaction requested=false 2024-12-09T14:29:16,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0862a744e65ec9e18a19a29534f5321e: 2024-12-09T14:29:16,687 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.3 K, sizeToCheck=16.0 K 2024-12-09T14:29:16,687 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:16,687 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052 because midkey is the same as first or last row 2024-12-09T14:29:16,690 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0862a744e65ec9e18a19a29534f5321e/info of 0862a744e65ec9e18a19a29534f5321e into 1d5296ef97744a9791ee8bfd5ca93dc8(size=43.9 K), total size for store is 67.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:29:16,690 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0862a744e65ec9e18a19a29534f5321e: 2024-12-09T14:29:16,690 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., storeName=0862a744e65ec9e18a19a29534f5321e/info, priority=13, startTime=1733754556654; duration=0sec 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8 because midkey is the same as first or last row 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8 because midkey is the same as first or last row 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8 because midkey is the same as first or last row 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:16,691 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0862a744e65ec9e18a19a29534f5321e:info 2024-12-09T14:29:17,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:17,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:18,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:18,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0862a744e65ec9e18a19a29534f5321e 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T14:29:18,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:18,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/00ab5d69e79140938333aa5f3202ab69 is 1080, key is row0056/info:/1733754556657/Put/seqid=0 2024-12-09T14:29:18,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741842_1018 (size=15740) 2024-12-09T14:29:18,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741842_1018 (size=15740) 2024-12-09T14:29:18,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/00ab5d69e79140938333aa5f3202ab69 2024-12-09T14:29:18,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/00ab5d69e79140938333aa5f3202ab69 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/00ab5d69e79140938333aa5f3202ab69 2024-12-09T14:29:18,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/00ab5d69e79140938333aa5f3202ab69, entries=10, sequenceid=82, filesize=15.4 K 2024-12-09T14:29:18,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=19.96 KB/20444 for 0862a744e65ec9e18a19a29534f5321e in 50ms, sequenceid=82, compaction requested=true 2024-12-09T14:29:18,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0862a744e65ec9e18a19a29534f5321e: 2024-12-09T14:29:18,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:18,732 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-09T14:29:18,732 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:18,732 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8 because midkey is the same as first or last row 2024-12-09T14:29:18,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0862a744e65ec9e18a19a29534f5321e:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:29:18,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:18,733 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:29:18,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0862a744e65ec9e18a19a29534f5321e 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-09T14:29:18,734 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:29:18,734 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): 0862a744e65ec9e18a19a29534f5321e/info is initiating minor compaction (all files) 2024-12-09T14:29:18,734 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0862a744e65ec9e18a19a29534f5321e/info in TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:29:18,735 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/3ddc170903104dada354117d7dc91164, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/00ab5d69e79140938333aa5f3202ab69] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp, totalSize=83.1 K 2024-12-09T14:29:18,735 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1d5296ef97744a9791ee8bfd5ca93dc8, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733754544479 2024-12-09T14:29:18,736 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3ddc170903104dada354117d7dc91164, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1733754556622 2024-12-09T14:29:18,736 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00ab5d69e79140938333aa5f3202ab69, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733754556657 2024-12-09T14:29:18,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/54ab0e21f41e4aaaa8ec4960130061ba is 1080, key is row0066/info:/1733754558683/Put/seqid=0 2024-12-09T14:29:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741843_1019 (size=26530) 2024-12-09T14:29:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741843_1019 (size=26530) 2024-12-09T14:29:18,755 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0862a744e65ec9e18a19a29534f5321e#info#compaction#62 average throughput is 16.67 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:29:18,756 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/1ccd25963e1043d0a67318bd9df69a53 is 1080, key is row0001/info:/1733754544479/Put/seqid=0 2024-12-09T14:29:18,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T14:29:18,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41394 deadline: 1733754568758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:18,760 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T14:29:18,760 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T14:29:18,760 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 because the exception is null or not the one we care about 2024-12-09T14:29:18,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741844_1020 (size=75378) 2024-12-09T14:29:18,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741844_1020 (size=75378) 2024-12-09T14:29:18,773 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/1ccd25963e1043d0a67318bd9df69a53 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53 2024-12-09T14:29:18,781 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0862a744e65ec9e18a19a29534f5321e/info of 0862a744e65ec9e18a19a29534f5321e into 1ccd25963e1043d0a67318bd9df69a53(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:29:18,781 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0862a744e65ec9e18a19a29534f5321e: 2024-12-09T14:29:18,781 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., storeName=0862a744e65ec9e18a19a29534f5321e/info, priority=13, startTime=1733754558733; duration=0sec 2024-12-09T14:29:18,781 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-09T14:29:18,781 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:18,781 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-09T14:29:18,781 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:18,781 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-09T14:29:18,781 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T14:29:18,783 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:18,783 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:18,783 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0862a744e65ec9e18a19a29534f5321e:info 2024-12-09T14:29:18,784 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46445 {}] assignment.AssignmentManager(1363): Split request from f4e784dc7cb5,34999,1733754533657, parent={ENCODED => 0862a744e65ec9e18a19a29534f5321e, NAME => 'TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T14:29:18,789 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46445 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:18,793 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46445 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0862a744e65ec9e18a19a29534f5321e, daughterA=19fa93135a4999e0f57da51576741184, daughterB=bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:18,794 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0862a744e65ec9e18a19a29534f5321e, daughterA=19fa93135a4999e0f57da51576741184, daughterB=bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:18,795 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0862a744e65ec9e18a19a29534f5321e, daughterA=19fa93135a4999e0f57da51576741184, daughterB=bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:18,795 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0862a744e65ec9e18a19a29534f5321e, daughterA=19fa93135a4999e0f57da51576741184, daughterB=bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:18,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, UNASSIGN}] 2024-12-09T14:29:18,804 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, UNASSIGN 2024-12-09T14:29:18,805 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0862a744e65ec9e18a19a29534f5321e, regionState=CLOSING, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:18,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, UNASSIGN because future has completed 2024-12-09T14:29:18,808 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T14:29:18,808 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657}] 2024-12-09T14:29:18,966 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:18,966 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T14:29:18,967 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 0862a744e65ec9e18a19a29534f5321e, disabling compactions & flushes 2024-12-09T14:29:18,967 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1993): waiting for 0 compactions & cache flush to complete for region TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:29:19,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/54ab0e21f41e4aaaa8ec4960130061ba 2024-12-09T14:29:19,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/54ab0e21f41e4aaaa8ec4960130061ba as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/54ab0e21f41e4aaaa8ec4960130061ba 2024-12-09T14:29:19,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/54ab0e21f41e4aaaa8ec4960130061ba, entries=20, sequenceid=105, filesize=25.9 K 2024-12-09T14:29:19,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for 0862a744e65ec9e18a19a29534f5321e in 429ms, sequenceid=105, compaction requested=false 2024-12-09T14:29:19,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0862a744e65ec9e18a19a29534f5321e: 2024-12-09T14:29:19,162 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:29:19,162 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:29:19,162 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. after waiting 0 ms 2024-12-09T14:29:19,162 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:29:19,163 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 0862a744e65ec9e18a19a29534f5321e 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-09T14:29:19,166 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/6978a453017d4e588b8894f35f9632fa is 1080, key is row0086/info:/1733754558734/Put/seqid=0 2024-12-09T14:29:19,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741845_1021 (size=14663) 2024-12-09T14:29:19,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741845_1021 (size=14663) 2024-12-09T14:29:19,173 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/6978a453017d4e588b8894f35f9632fa 2024-12-09T14:29:19,178 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/.tmp/info/6978a453017d4e588b8894f35f9632fa as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/6978a453017d4e588b8894f35f9632fa 2024-12-09T14:29:19,183 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/6978a453017d4e588b8894f35f9632fa, entries=9, sequenceid=118, filesize=14.3 K 2024-12-09T14:29:19,185 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 0862a744e65ec9e18a19a29534f5321e in 22ms, sequenceid=118, compaction requested=true 2024-12-09T14:29:19,186 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/85483f5423534826b82c4e6365f65431, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/01ed64984bf843a28c34b4f5e84366f2, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/3ddc170903104dada354117d7dc91164, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/00ab5d69e79140938333aa5f3202ab69] to archive 2024-12-09T14:29:19,187 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T14:29:19,188 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/85483f5423534826b82c4e6365f65431 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/85483f5423534826b82c4e6365f65431 2024-12-09T14:29:19,189 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/bce17a6b75ed4bcfb8fce2d270057052 2024-12-09T14:29:19,190 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1d5296ef97744a9791ee8bfd5ca93dc8 2024-12-09T14:29:19,192 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/01ed64984bf843a28c34b4f5e84366f2 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/01ed64984bf843a28c34b4f5e84366f2 2024-12-09T14:29:19,193 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/3ddc170903104dada354117d7dc91164 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/3ddc170903104dada354117d7dc91164 2024-12-09T14:29:19,193 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/00ab5d69e79140938333aa5f3202ab69 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/00ab5d69e79140938333aa5f3202ab69 2024-12-09T14:29:19,200 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=1 2024-12-09T14:29:19,200 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. 2024-12-09T14:29:19,200 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 0862a744e65ec9e18a19a29534f5321e: Waiting for close lock at 1733754558967Running coprocessor pre-close hooks at 1733754558967Disabling compacts and flushes for region at 1733754558967Disabling writes for close at 1733754559162 (+195 ms)Obtaining lock to block concurrent updates at 1733754559163 (+1 ms)Preparing flush snapshotting stores in 0862a744e65ec9e18a19a29534f5321e at 1733754559163Finished memstore snapshotting TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., syncing WAL and waiting on mvcc, flushsize=dataSize=9684, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1733754559163Flushing stores of TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. at 1733754559163Flushing 0862a744e65ec9e18a19a29534f5321e/info: creating writer at 1733754559163Flushing 0862a744e65ec9e18a19a29534f5321e/info: appending metadata at 1733754559166 (+3 ms)Flushing 0862a744e65ec9e18a19a29534f5321e/info: closing flushed file at 1733754559166Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e76a703: reopening flushed file at 1733754559178 (+12 ms)Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 0862a744e65ec9e18a19a29534f5321e in 22ms, sequenceid=118, compaction requested=true at 1733754559185 (+7 ms)Writing region close event to WAL at 1733754559196 (+11 ms)Running coprocessor post-close hooks at 1733754559200 (+4 ms)Closed at 1733754559200 2024-12-09T14:29:19,202 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,203 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0862a744e65ec9e18a19a29534f5321e, regionState=CLOSED 2024-12-09T14:29:19,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 because future has completed 2024-12-09T14:29:19,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-09T14:29:19,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 0862a744e65ec9e18a19a29534f5321e, server=f4e784dc7cb5,34999,1733754533657 in 398 msec 2024-12-09T14:29:19,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T14:29:19,211 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0862a744e65ec9e18a19a29534f5321e, UNASSIGN in 406 msec 2024-12-09T14:29:19,220 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:29:19,223 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=0862a744e65ec9e18a19a29534f5321e, threads=3 2024-12-09T14:29:19,225 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53 for region: 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,225 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/54ab0e21f41e4aaaa8ec4960130061ba for region: 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,225 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/6978a453017d4e588b8894f35f9632fa for region: 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,237 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/54ab0e21f41e4aaaa8ec4960130061ba, top=true 2024-12-09T14:29:19,241 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/6978a453017d4e588b8894f35f9632fa, top=true 2024-12-09T14:29:19,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741846_1022 (size=27) 2024-12-09T14:29:19,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741846_1022 (size=27) 2024-12-09T14:29:19,246 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-54ab0e21f41e4aaaa8ec4960130061ba for child: bb7a8687dd4764718ce346f3cc62241b, parent: 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,247 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-6978a453017d4e588b8894f35f9632fa for child: bb7a8687dd4764718ce346f3cc62241b, parent: 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,247 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/54ab0e21f41e4aaaa8ec4960130061ba for region: 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,247 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/6978a453017d4e588b8894f35f9632fa for region: 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741847_1023 (size=27) 2024-12-09T14:29:19,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741847_1023 (size=27) 2024-12-09T14:29:19,269 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53 for region: 0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:29:19,277 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 0862a744e65ec9e18a19a29534f5321e Daughter A: [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e] storefiles, Daughter B: [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-54ab0e21f41e4aaaa8ec4960130061ba, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-6978a453017d4e588b8894f35f9632fa] storefiles. 2024-12-09T14:29:19,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741848_1024 (size=71) 2024-12-09T14:29:19,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741848_1024 (size=71) 2024-12-09T14:29:19,300 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:29:19,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741849_1025 (size=71) 2024-12-09T14:29:19,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741849_1025 (size=71) 2024-12-09T14:29:19,317 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:29:19,329 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-09T14:29:19,331 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-12-09T14:29:19,334 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733754559334"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733754559334"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733754559334"}]},"ts":"1733754559334"} 2024-12-09T14:29:19,335 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733754559334"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733754559334"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733754559334"}]},"ts":"1733754559334"} 2024-12-09T14:29:19,335 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733754559334"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733754559334"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733754559334"}]},"ts":"1733754559334"} 2024-12-09T14:29:19,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=19fa93135a4999e0f57da51576741184, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb7a8687dd4764718ce346f3cc62241b, ASSIGN}] 2024-12-09T14:29:19,356 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=19fa93135a4999e0f57da51576741184, ASSIGN 2024-12-09T14:29:19,356 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb7a8687dd4764718ce346f3cc62241b, ASSIGN 2024-12-09T14:29:19,357 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb7a8687dd4764718ce346f3cc62241b, ASSIGN; state=SPLITTING_NEW, location=f4e784dc7cb5,34999,1733754533657; forceNewPlan=false, retain=false 2024-12-09T14:29:19,357 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=19fa93135a4999e0f57da51576741184, ASSIGN; state=SPLITTING_NEW, location=f4e784dc7cb5,34999,1733754533657; forceNewPlan=false, retain=false 2024-12-09T14:29:19,508 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=bb7a8687dd4764718ce346f3cc62241b, regionState=OPENING, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:19,508 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=19fa93135a4999e0f57da51576741184, regionState=OPENING, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:19,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb7a8687dd4764718ce346f3cc62241b, ASSIGN because future has completed 2024-12-09T14:29:19,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb7a8687dd4764718ce346f3cc62241b, server=f4e784dc7cb5,34999,1733754533657}] 2024-12-09T14:29:19,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=19fa93135a4999e0f57da51576741184, ASSIGN because future has completed 2024-12-09T14:29:19,512 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 19fa93135a4999e0f57da51576741184, server=f4e784dc7cb5,34999,1733754533657}] 2024-12-09T14:29:19,668 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:19,668 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => bb7a8687dd4764718ce346f3cc62241b, NAME => 'TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-09T14:29:19,668 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,668 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:29:19,668 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,669 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,670 INFO [StoreOpener-bb7a8687dd4764718ce346f3cc62241b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,671 INFO [StoreOpener-bb7a8687dd4764718ce346f3cc62241b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb7a8687dd4764718ce346f3cc62241b columnFamilyName info 2024-12-09T14:29:19,671 DEBUG [StoreOpener-bb7a8687dd4764718ce346f3cc62241b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:29:19,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:19,684 DEBUG [StoreOpener-bb7a8687dd4764718ce346f3cc62241b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e->hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53-top 2024-12-09T14:29:19,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:19,689 DEBUG [StoreOpener-bb7a8687dd4764718ce346f3cc62241b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-54ab0e21f41e4aaaa8ec4960130061ba 2024-12-09T14:29:19,695 DEBUG [StoreOpener-bb7a8687dd4764718ce346f3cc62241b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-6978a453017d4e588b8894f35f9632fa 2024-12-09T14:29:19,696 INFO [StoreOpener-bb7a8687dd4764718ce346f3cc62241b-1 {}] regionserver.HStore(327): Store=bb7a8687dd4764718ce346f3cc62241b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:29:19,696 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,697 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,698 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,698 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,698 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,700 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,700 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened bb7a8687dd4764718ce346f3cc62241b; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=726133, jitterRate=-0.07667502760887146}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T14:29:19,700 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:19,701 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for bb7a8687dd4764718ce346f3cc62241b: Running coprocessor pre-open hook at 1733754559669Writing region info on filesystem at 1733754559669Initializing all the Stores at 1733754559670 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754559670Cleaning up temporary data from old regions at 1733754559698 (+28 ms)Running coprocessor post-open hooks at 1733754559700 (+2 ms)Region opened successfully at 1733754559701 (+1 ms) 2024-12-09T14:29:19,702 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., pid=12, masterSystemTime=1733754559663 2024-12-09T14:29:19,702 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store bb7a8687dd4764718ce346f3cc62241b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:29:19,702 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:19,702 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:29:19,703 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:19,703 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): bb7a8687dd4764718ce346f3cc62241b/info is initiating minor compaction (all files) 2024-12-09T14:29:19,703 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb7a8687dd4764718ce346f3cc62241b/info in TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:19,703 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e->hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53-top, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-54ab0e21f41e4aaaa8ec4960130061ba, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-6978a453017d4e588b8894f35f9632fa] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp, totalSize=113.8 K 2024-12-09T14:29:19,704 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733754544479 2024-12-09T14:29:19,704 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:19,704 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:19,704 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-54ab0e21f41e4aaaa8ec4960130061ba, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1733754558683 2024-12-09T14:29:19,704 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:29:19,704 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 19fa93135a4999e0f57da51576741184, NAME => 'TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-09T14:29:19,704 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-6978a453017d4e588b8894f35f9632fa, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733754558734 2024-12-09T14:29:19,704 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,704 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:29:19,705 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,705 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,705 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=bb7a8687dd4764718ce346f3cc62241b, regionState=OPEN, openSeqNum=122, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:19,706 INFO [StoreOpener-19fa93135a4999e0f57da51576741184-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,706 INFO [StoreOpener-19fa93135a4999e0f57da51576741184-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 19fa93135a4999e0f57da51576741184 columnFamilyName info 2024-12-09T14:29:19,706 DEBUG [StoreOpener-19fa93135a4999e0f57da51576741184-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:29:19,707 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-09T14:29:19,707 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-09T14:29:19,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-09T14:29:19,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb7a8687dd4764718ce346f3cc62241b, server=f4e784dc7cb5,34999,1733754533657 because future has completed 2024-12-09T14:29:19,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T14:29:19,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure bb7a8687dd4764718ce346f3cc62241b, server=f4e784dc7cb5,34999,1733754533657 in 197 msec 2024-12-09T14:29:19,713 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bb7a8687dd4764718ce346f3cc62241b, ASSIGN in 356 msec 2024-12-09T14:29:19,716 DEBUG [StoreOpener-19fa93135a4999e0f57da51576741184-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e->hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53-bottom 2024-12-09T14:29:19,717 INFO [StoreOpener-19fa93135a4999e0f57da51576741184-1 {}] regionserver.HStore(327): Store=19fa93135a4999e0f57da51576741184/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:29:19,717 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,718 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,718 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,719 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,719 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,720 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,721 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 19fa93135a4999e0f57da51576741184; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831359, jitterRate=0.0571286678314209}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T14:29:19,721 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 19fa93135a4999e0f57da51576741184 2024-12-09T14:29:19,721 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 19fa93135a4999e0f57da51576741184: Running coprocessor pre-open hook at 1733754559705Writing region info on filesystem at 1733754559705Initializing all the Stores at 1733754559705Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754559705Cleaning up temporary data from old regions at 1733754559719 (+14 ms)Running coprocessor post-open hooks at 1733754559721 (+2 ms)Region opened successfully at 1733754559721 2024-12-09T14:29:19,722 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184., pid=13, masterSystemTime=1733754559663 2024-12-09T14:29:19,722 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 19fa93135a4999e0f57da51576741184:info, priority=-2147483648, current under compaction store size is 2 2024-12-09T14:29:19,722 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:19,722 DEBUG [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-09T14:29:19,723 INFO [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:29:19,723 DEBUG [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.HStore(1541): 19fa93135a4999e0f57da51576741184/info is initiating minor compaction (all files) 2024-12-09T14:29:19,723 INFO [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 19fa93135a4999e0f57da51576741184/info in TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:29:19,723 INFO [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e->hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53-bottom] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/.tmp, totalSize=73.6 K 2024-12-09T14:29:19,724 DEBUG [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] compactions.Compactor(225): Compacting 1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733754544479 2024-12-09T14:29:19,724 DEBUG [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:29:19,724 INFO [RS_OPEN_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:29:19,725 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=19fa93135a4999e0f57da51576741184, regionState=OPEN, openSeqNum=122, regionLocation=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:19,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/info/ee4d91e287174d728a869d6f6b646285 is 193, key is TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b./info:regioninfo/1733754559705/Put/seqid=0 2024-12-09T14:29:19,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 19fa93135a4999e0f57da51576741184, server=f4e784dc7cb5,34999,1733754533657 because future has completed 2024-12-09T14:29:19,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-09T14:29:19,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 19fa93135a4999e0f57da51576741184, server=f4e784dc7cb5,34999,1733754533657 in 217 msec 2024-12-09T14:29:19,733 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb7a8687dd4764718ce346f3cc62241b#info#compaction#65 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:29:19,734 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/8bc5745262464e688ca2f98a5f87ace8 is 1080, key is row0062/info:/1733754556674/Put/seqid=0 2024-12-09T14:29:19,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-09T14:29:19,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=19fa93135a4999e0f57da51576741184, ASSIGN in 377 msec 2024-12-09T14:29:19,737 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0862a744e65ec9e18a19a29534f5321e, daughterA=19fa93135a4999e0f57da51576741184, daughterB=bb7a8687dd4764718ce346f3cc62241b in 945 msec 2024-12-09T14:29:19,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741850_1026 (size=9882) 2024-12-09T14:29:19,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741850_1026 (size=9882) 2024-12-09T14:29:19,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/info/ee4d91e287174d728a869d6f6b646285 2024-12-09T14:29:19,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741851_1027 (size=40830) 2024-12-09T14:29:19,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741851_1027 (size=40830) 2024-12-09T14:29:19,745 INFO [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 19fa93135a4999e0f57da51576741184#info#compaction#66 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:29:19,745 DEBUG [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/.tmp/info/cd9cb342b6964e5b818b2931bf3b1d19 is 1080, key is row0001/info:/1733754544479/Put/seqid=0 2024-12-09T14:29:19,746 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/8bc5745262464e688ca2f98a5f87ace8 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8bc5745262464e688ca2f98a5f87ace8 2024-12-09T14:29:19,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741852_1028 (size=70862) 2024-12-09T14:29:19,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741852_1028 (size=70862) 2024-12-09T14:29:19,752 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb7a8687dd4764718ce346f3cc62241b/info of bb7a8687dd4764718ce346f3cc62241b into 8bc5745262464e688ca2f98a5f87ace8(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:29:19,752 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:19,753 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., storeName=bb7a8687dd4764718ce346f3cc62241b/info, priority=13, startTime=1733754559702; duration=0sec 2024-12-09T14:29:19,753 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:19,753 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb7a8687dd4764718ce346f3cc62241b:info 2024-12-09T14:29:19,755 DEBUG [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/.tmp/info/cd9cb342b6964e5b818b2931bf3b1d19 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/info/cd9cb342b6964e5b818b2931bf3b1d19 2024-12-09T14:29:19,760 INFO [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 19fa93135a4999e0f57da51576741184/info of 19fa93135a4999e0f57da51576741184 into cd9cb342b6964e5b818b2931bf3b1d19(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:29:19,760 DEBUG [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 19fa93135a4999e0f57da51576741184: 2024-12-09T14:29:19,760 INFO [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184., storeName=19fa93135a4999e0f57da51576741184/info, priority=15, startTime=1733754559722; duration=0sec 2024-12-09T14:29:19,760 DEBUG [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:19,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/ns/f526c90edc7a472d934ac8a75e9dd17a is 43, key is default/ns:d/1733754534314/Put/seqid=0 2024-12-09T14:29:19,760 DEBUG [RS:0;f4e784dc7cb5:34999-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 19fa93135a4999e0f57da51576741184:info 2024-12-09T14:29:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741853_1029 (size=5153) 2024-12-09T14:29:19,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741853_1029 (size=5153) 2024-12-09T14:29:19,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/ns/f526c90edc7a472d934ac8a75e9dd17a 2024-12-09T14:29:19,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/table/3bcc9a2fbae34763b5e5fb55d97a2d69 is 65, key is TestLogRolling-testLogRolling/table:state/1733754534754/Put/seqid=0 2024-12-09T14:29:19,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741854_1030 (size=5340) 2024-12-09T14:29:19,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741854_1030 (size=5340) 2024-12-09T14:29:19,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/table/3bcc9a2fbae34763b5e5fb55d97a2d69 2024-12-09T14:29:19,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/info/ee4d91e287174d728a869d6f6b646285 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/info/ee4d91e287174d728a869d6f6b646285 2024-12-09T14:29:19,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/info/ee4d91e287174d728a869d6f6b646285, entries=30, sequenceid=17, filesize=9.7 K 2024-12-09T14:29:19,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/ns/f526c90edc7a472d934ac8a75e9dd17a as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/ns/f526c90edc7a472d934ac8a75e9dd17a 2024-12-09T14:29:19,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/ns/f526c90edc7a472d934ac8a75e9dd17a, entries=2, sequenceid=17, filesize=5.0 K 2024-12-09T14:29:19,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/table/3bcc9a2fbae34763b5e5fb55d97a2d69 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/table/3bcc9a2fbae34763b5e5fb55d97a2d69 2024-12-09T14:29:19,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/table/3bcc9a2fbae34763b5e5fb55d97a2d69, entries=2, sequenceid=17, filesize=5.2 K 2024-12-09T14:29:19,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 105ms, sequenceid=17, compaction requested=false 2024-12-09T14:29:19,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T14:29:20,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:20,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:21,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:21,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:22,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:22,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:23,584 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T14:29:23,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:23,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:24,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:24,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:24,741 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:29:24,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:24,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:29:25,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:25,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:26,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:26,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:27,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:27,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:28,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:28,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:28,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41394 deadline: 1733754578807, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. is not online on f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:28,809 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. is not online on f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T14:29:28,809 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e. is not online on f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T14:29:28,809 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733754534393.0862a744e65ec9e18a19a29534f5321e., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=2 from cache 2024-12-09T14:29:29,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:29,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:30,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:30,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:31,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:31,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:32,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:32,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:33,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:33,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:34,329 INFO [master/f4e784dc7cb5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T14:29:34,329 INFO [master/f4e784dc7cb5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T14:29:34,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:34,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:35,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:35,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:36,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:36,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:37,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:37,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:38,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:38,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:39,284 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-09T14:29:39,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:39,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:40,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:40,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:41,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:41,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:42,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:42,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:43,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:43,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:44,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:44,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:45,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:45,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:46,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:46,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:47,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:47,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:48,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:48,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:48,924 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0095', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=122] 2024-12-09T14:29:49,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:49,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:50,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:50,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:50,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:50,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:29:50,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/50de1dfdeca44584bb06ab7e58b5673a is 1080, key is row0095/info:/1733754588925/Put/seqid=0 2024-12-09T14:29:50,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741855_1031 (size=12513) 2024-12-09T14:29:50,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741855_1031 (size=12513) 2024-12-09T14:29:50,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/50de1dfdeca44584bb06ab7e58b5673a 2024-12-09T14:29:50,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/50de1dfdeca44584bb06ab7e58b5673a as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/50de1dfdeca44584bb06ab7e58b5673a 2024-12-09T14:29:50,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/50de1dfdeca44584bb06ab7e58b5673a, entries=7, sequenceid=132, filesize=12.2 K 2024-12-09T14:29:50,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for bb7a8687dd4764718ce346f3cc62241b in 32ms, sequenceid=132, compaction requested=false 2024-12-09T14:29:50,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:50,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:50,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-09T14:29:50,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/a411ca588fad4fb8ab1e274e658b6136 is 1080, key is row0102/info:/1733754590939/Put/seqid=0 2024-12-09T14:29:50,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741856_1032 (size=23316) 2024-12-09T14:29:50,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741856_1032 (size=23316) 2024-12-09T14:29:50,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/a411ca588fad4fb8ab1e274e658b6136 2024-12-09T14:29:50,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/a411ca588fad4fb8ab1e274e658b6136 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a411ca588fad4fb8ab1e274e658b6136 2024-12-09T14:29:50,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a411ca588fad4fb8ab1e274e658b6136, entries=17, sequenceid=152, filesize=22.8 K 2024-12-09T14:29:50,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=10.51 KB/10760 for bb7a8687dd4764718ce346f3cc62241b in 23ms, sequenceid=152, compaction requested=true 2024-12-09T14:29:50,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:50,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb7a8687dd4764718ce346f3cc62241b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:29:50,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:50,994 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:29:50,995 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:29:50,995 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): bb7a8687dd4764718ce346f3cc62241b/info is initiating minor compaction (all files) 2024-12-09T14:29:50,996 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb7a8687dd4764718ce346f3cc62241b/info in TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:50,996 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8bc5745262464e688ca2f98a5f87ace8, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/50de1dfdeca44584bb06ab7e58b5673a, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a411ca588fad4fb8ab1e274e658b6136] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp, totalSize=74.9 K 2024-12-09T14:29:50,996 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8bc5745262464e688ca2f98a5f87ace8, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733754556674 2024-12-09T14:29:50,997 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 50de1dfdeca44584bb06ab7e58b5673a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733754588925 2024-12-09T14:29:50,997 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting a411ca588fad4fb8ab1e274e658b6136, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733754590939 2024-12-09T14:29:51,014 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb7a8687dd4764718ce346f3cc62241b#info#compaction#71 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:29:51,014 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/7d0e146bb4594902bea156908eeadf9c is 1080, key is row0062/info:/1733754556674/Put/seqid=0 2024-12-09T14:29:51,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741857_1033 (size=66869) 2024-12-09T14:29:51,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741857_1033 (size=66869) 2024-12-09T14:29:51,030 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/7d0e146bb4594902bea156908eeadf9c as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/7d0e146bb4594902bea156908eeadf9c 2024-12-09T14:29:51,037 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb7a8687dd4764718ce346f3cc62241b/info of bb7a8687dd4764718ce346f3cc62241b into 7d0e146bb4594902bea156908eeadf9c(size=65.3 K), total size for store is 65.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:29:51,037 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:51,037 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., storeName=bb7a8687dd4764718ce346f3cc62241b/info, priority=13, startTime=1733754590994; duration=0sec 2024-12-09T14:29:51,037 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:51,037 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb7a8687dd4764718ce346f3cc62241b:info 2024-12-09T14:29:51,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:51,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:52,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:52,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:52,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:52,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T14:29:52,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/48ea9e5fff6d40ffbd055b54cc7ae9ac is 1080, key is row0119/info:/1733754590972/Put/seqid=0 2024-12-09T14:29:53,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741858_1034 (size=16828) 2024-12-09T14:29:53,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741858_1034 (size=16828) 2024-12-09T14:29:53,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/48ea9e5fff6d40ffbd055b54cc7ae9ac 2024-12-09T14:29:53,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/48ea9e5fff6d40ffbd055b54cc7ae9ac as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/48ea9e5fff6d40ffbd055b54cc7ae9ac 2024-12-09T14:29:53,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/48ea9e5fff6d40ffbd055b54cc7ae9ac, entries=11, sequenceid=167, filesize=16.4 K 2024-12-09T14:29:53,018 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for bb7a8687dd4764718ce346f3cc62241b in 26ms, sequenceid=167, compaction requested=false 2024-12-09T14:29:53,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:53,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:53,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-09T14:29:53,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/8e3ad82119d740139e89411095223797 is 1080, key is row0130/info:/1733754592993/Put/seqid=0 2024-12-09T14:29:53,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741859_1035 (size=21156) 2024-12-09T14:29:53,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741859_1035 (size=21156) 2024-12-09T14:29:53,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/8e3ad82119d740139e89411095223797 2024-12-09T14:29:53,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/8e3ad82119d740139e89411095223797 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8e3ad82119d740139e89411095223797 2024-12-09T14:29:53,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8e3ad82119d740139e89411095223797, entries=15, sequenceid=185, filesize=20.7 K 2024-12-09T14:29:53,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for bb7a8687dd4764718ce346f3cc62241b in 22ms, sequenceid=185, compaction requested=true 2024-12-09T14:29:53,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:53,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb7a8687dd4764718ce346f3cc62241b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:29:53,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:53,042 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:29:53,043 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 104853 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:29:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:53,043 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): bb7a8687dd4764718ce346f3cc62241b/info is initiating minor compaction (all files) 2024-12-09T14:29:53,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-09T14:29:53,043 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb7a8687dd4764718ce346f3cc62241b/info in TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:53,043 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/7d0e146bb4594902bea156908eeadf9c, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/48ea9e5fff6d40ffbd055b54cc7ae9ac, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8e3ad82119d740139e89411095223797] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp, totalSize=102.4 K 2024-12-09T14:29:53,044 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7d0e146bb4594902bea156908eeadf9c, keycount=57, bloomtype=ROW, size=65.3 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1733754556674 2024-12-09T14:29:53,044 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 48ea9e5fff6d40ffbd055b54cc7ae9ac, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733754590972 2024-12-09T14:29:53,045 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e3ad82119d740139e89411095223797, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733754592993 2024-12-09T14:29:53,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/e2f9e35bee914621aad13e7cbaf0b43f is 1080, key is row0145/info:/1733754593021/Put/seqid=0 2024-12-09T14:29:53,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741860_1036 (size=21156) 2024-12-09T14:29:53,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741860_1036 (size=21156) 2024-12-09T14:29:53,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/e2f9e35bee914621aad13e7cbaf0b43f 2024-12-09T14:29:53,056 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb7a8687dd4764718ce346f3cc62241b#info#compaction#75 average throughput is 85.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:29:53,057 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/531b4afd3d2a436da7f31ec8c7083352 is 1080, key is row0062/info:/1733754556674/Put/seqid=0 2024-12-09T14:29:53,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/e2f9e35bee914621aad13e7cbaf0b43f as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e2f9e35bee914621aad13e7cbaf0b43f 2024-12-09T14:29:53,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741861_1037 (size=95076) 2024-12-09T14:29:53,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741861_1037 (size=95076) 2024-12-09T14:29:53,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e2f9e35bee914621aad13e7cbaf0b43f, entries=15, sequenceid=203, filesize=20.7 K 2024-12-09T14:29:53,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for bb7a8687dd4764718ce346f3cc62241b in 26ms, sequenceid=203, compaction requested=false 2024-12-09T14:29:53,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:53,469 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/531b4afd3d2a436da7f31ec8c7083352 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/531b4afd3d2a436da7f31ec8c7083352 2024-12-09T14:29:53,476 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb7a8687dd4764718ce346f3cc62241b/info of bb7a8687dd4764718ce346f3cc62241b into 531b4afd3d2a436da7f31ec8c7083352(size=92.8 K), total size for store is 113.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:29:53,476 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:53,477 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., storeName=bb7a8687dd4764718ce346f3cc62241b/info, priority=13, startTime=1733754593042; duration=0sec 2024-12-09T14:29:53,477 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:53,477 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb7a8687dd4764718ce346f3cc62241b:info 2024-12-09T14:29:53,585 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T14:29:53,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:53,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:54,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:54,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:55,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:55,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:29:55,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/c08bd61a779c4eb4b70e7d593d9ce242 is 1080, key is row0160/info:/1733754593044/Put/seqid=0 2024-12-09T14:29:55,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741862_1038 (size=12516) 2024-12-09T14:29:55,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741862_1038 (size=12516) 2024-12-09T14:29:55,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/c08bd61a779c4eb4b70e7d593d9ce242 2024-12-09T14:29:55,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/c08bd61a779c4eb4b70e7d593d9ce242 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/c08bd61a779c4eb4b70e7d593d9ce242 2024-12-09T14:29:55,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/c08bd61a779c4eb4b70e7d593d9ce242, entries=7, sequenceid=214, filesize=12.2 K 2024-12-09T14:29:55,091 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for bb7a8687dd4764718ce346f3cc62241b in 33ms, sequenceid=214, compaction requested=true 2024-12-09T14:29:55,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:55,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb7a8687dd4764718ce346f3cc62241b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:29:55,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:55,092 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:29:55,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:55,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-09T14:29:55,093 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128748 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:29:55,093 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): bb7a8687dd4764718ce346f3cc62241b/info is initiating minor compaction (all files) 2024-12-09T14:29:55,093 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb7a8687dd4764718ce346f3cc62241b/info in TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:55,093 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/531b4afd3d2a436da7f31ec8c7083352, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e2f9e35bee914621aad13e7cbaf0b43f, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/c08bd61a779c4eb4b70e7d593d9ce242] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp, totalSize=125.7 K 2024-12-09T14:29:55,093 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 531b4afd3d2a436da7f31ec8c7083352, keycount=83, bloomtype=ROW, size=92.8 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1733754556674 2024-12-09T14:29:55,094 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting e2f9e35bee914621aad13e7cbaf0b43f, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733754593021 2024-12-09T14:29:55,094 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting c08bd61a779c4eb4b70e7d593d9ce242, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733754593044 2024-12-09T14:29:55,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/ce9d07d8e85f42429d5abb7be740d146 is 1080, key is row0167/info:/1733754595059/Put/seqid=0 2024-12-09T14:29:55,107 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb7a8687dd4764718ce346f3cc62241b#info#compaction#78 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:29:55,109 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/9bcfafd125884e119891417bf6b679d7 is 1080, key is row0062/info:/1733754556674/Put/seqid=0 2024-12-09T14:29:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741863_1039 (size=25472) 2024-12-09T14:29:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741863_1039 (size=25472) 2024-12-09T14:29:55,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/ce9d07d8e85f42429d5abb7be740d146 2024-12-09T14:29:55,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/ce9d07d8e85f42429d5abb7be740d146 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/ce9d07d8e85f42429d5abb7be740d146 2024-12-09T14:29:55,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741864_1040 (size=118898) 2024-12-09T14:29:55,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741864_1040 (size=118898) 2024-12-09T14:29:55,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/ce9d07d8e85f42429d5abb7be740d146, entries=19, sequenceid=236, filesize=24.9 K 2024-12-09T14:29:55,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=7.36 KB/7532 for bb7a8687dd4764718ce346f3cc62241b in 30ms, sequenceid=236, compaction requested=false 2024-12-09T14:29:55,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:55,125 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/9bcfafd125884e119891417bf6b679d7 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9bcfafd125884e119891417bf6b679d7 2024-12-09T14:29:55,131 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb7a8687dd4764718ce346f3cc62241b/info of bb7a8687dd4764718ce346f3cc62241b into 9bcfafd125884e119891417bf6b679d7(size=116.1 K), total size for store is 141.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:29:55,131 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:55,131 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., storeName=bb7a8687dd4764718ce346f3cc62241b/info, priority=13, startTime=1733754595091; duration=0sec 2024-12-09T14:29:55,131 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:55,131 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb7a8687dd4764718ce346f3cc62241b:info 2024-12-09T14:29:55,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:55,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:56,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:56,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:57,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:57,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-09T14:29:57,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/e449716fc4d24326b5e92d4cd0cf99cf is 1080, key is row0186/info:/1733754595093/Put/seqid=0 2024-12-09T14:29:57,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741865_1041 (size=13594) 2024-12-09T14:29:57,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741865_1041 (size=13594) 2024-12-09T14:29:57,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/e449716fc4d24326b5e92d4cd0cf99cf 2024-12-09T14:29:57,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/e449716fc4d24326b5e92d4cd0cf99cf as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e449716fc4d24326b5e92d4cd0cf99cf 2024-12-09T14:29:57,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e449716fc4d24326b5e92d4cd0cf99cf, entries=8, sequenceid=248, filesize=13.3 K 2024-12-09T14:29:57,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=16.81 KB/17216 for bb7a8687dd4764718ce346f3cc62241b in 26ms, sequenceid=248, compaction requested=true 2024-12-09T14:29:57,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:57,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb7a8687dd4764718ce346f3cc62241b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:29:57,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:57,135 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:29:57,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:29:57,137 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157964 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:29:57,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T14:29:57,137 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): bb7a8687dd4764718ce346f3cc62241b/info is initiating minor compaction (all files) 2024-12-09T14:29:57,137 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb7a8687dd4764718ce346f3cc62241b/info in TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:29:57,137 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9bcfafd125884e119891417bf6b679d7, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/ce9d07d8e85f42429d5abb7be740d146, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e449716fc4d24326b5e92d4cd0cf99cf] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp, totalSize=154.3 K 2024-12-09T14:29:57,137 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9bcfafd125884e119891417bf6b679d7, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1733754556674 2024-12-09T14:29:57,138 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting ce9d07d8e85f42429d5abb7be740d146, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733754595059 2024-12-09T14:29:57,138 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting e449716fc4d24326b5e92d4cd0cf99cf, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733754595093 2024-12-09T14:29:57,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/f1729f0dd1d44026b4b22aa2951d4474 is 1080, key is row0194/info:/1733754597110/Put/seqid=0 2024-12-09T14:29:57,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741866_1042 (size=24406) 2024-12-09T14:29:57,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741866_1042 (size=24406) 2024-12-09T14:29:57,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/f1729f0dd1d44026b4b22aa2951d4474 2024-12-09T14:29:57,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb7a8687dd4764718ce346f3cc62241b, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T14:29:57,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41394 deadline: 1733754607154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb7a8687dd4764718ce346f3cc62241b, server=f4e784dc7cb5,34999,1733754533657 2024-12-09T14:29:57,155 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=122 , the old value is region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=122, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb7a8687dd4764718ce346f3cc62241b, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T14:29:57,156 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=122 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=bb7a8687dd4764718ce346f3cc62241b, server=f4e784dc7cb5,34999,1733754533657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T14:29:57,156 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., hostname=f4e784dc7cb5,34999,1733754533657, seqNum=122 because the exception is null or not the one we care about 2024-12-09T14:29:57,157 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb7a8687dd4764718ce346f3cc62241b#info#compaction#81 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:29:57,158 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/650568b5ec8e44d49c4a0cacca3bdeb2 is 1080, key is row0062/info:/1733754556674/Put/seqid=0 2024-12-09T14:29:57,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/f1729f0dd1d44026b4b22aa2951d4474 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/f1729f0dd1d44026b4b22aa2951d4474 2024-12-09T14:29:57,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/f1729f0dd1d44026b4b22aa2951d4474, entries=18, sequenceid=269, filesize=23.8 K 2024-12-09T14:29:57,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741867_1043 (size=148311) 2024-12-09T14:29:57,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741867_1043 (size=148311) 2024-12-09T14:29:57,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for bb7a8687dd4764718ce346f3cc62241b in 28ms, sequenceid=269, compaction requested=false 2024-12-09T14:29:57,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:57,169 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/650568b5ec8e44d49c4a0cacca3bdeb2 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/650568b5ec8e44d49c4a0cacca3bdeb2 2024-12-09T14:29:57,174 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb7a8687dd4764718ce346f3cc62241b/info of bb7a8687dd4764718ce346f3cc62241b into 650568b5ec8e44d49c4a0cacca3bdeb2(size=144.8 K), total size for store is 168.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:29:57,174 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:29:57,174 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., storeName=bb7a8687dd4764718ce346f3cc62241b/info, priority=13, startTime=1733754597135; duration=0sec 2024-12-09T14:29:57,174 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:29:57,174 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb7a8687dd4764718ce346f3cc62241b:info 2024-12-09T14:29:57,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:57,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:58,343 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=67, reuseRatio=88.16% 2024-12-09T14:29:58,343 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T14:29:58,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:58,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:59,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:29:59,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:00,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:00,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:01,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:01,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:02,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:02,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:03,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:03,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:04,669 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bb7a8687dd4764718ce346f3cc62241b, had cached 0 bytes from a total of 172717 2024-12-09T14:30:04,705 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 19fa93135a4999e0f57da51576741184, had cached 0 bytes from a total of 70862 2024-12-09T14:30:04,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:04,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:04,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,973 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,973 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,973 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,974 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,974 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,974 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,976 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:04,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,485 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T14:30:05,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,487 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,512 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T14:30:05,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:05,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:06,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:06,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:07,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:30:07,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T14:30:07,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/a7d15b857a3847a082af5bb56a5d8920 is 1080, key is row0212/info:/1733754597137/Put/seqid=0 2024-12-09T14:30:07,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741868_1044 (size=17918) 2024-12-09T14:30:07,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741868_1044 (size=17918) 2024-12-09T14:30:07,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/a7d15b857a3847a082af5bb56a5d8920 2024-12-09T14:30:07,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/a7d15b857a3847a082af5bb56a5d8920 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a7d15b857a3847a082af5bb56a5d8920 2024-12-09T14:30:07,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a7d15b857a3847a082af5bb56a5d8920, entries=12, sequenceid=285, filesize=17.5 K 2024-12-09T14:30:07,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for bb7a8687dd4764718ce346f3cc62241b in 99ms, sequenceid=285, compaction requested=true 2024-12-09T14:30:07,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:30:07,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb7a8687dd4764718ce346f3cc62241b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:30:07,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:30:07,299 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:30:07,300 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:30:07,300 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): bb7a8687dd4764718ce346f3cc62241b/info is initiating minor compaction (all files) 2024-12-09T14:30:07,301 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb7a8687dd4764718ce346f3cc62241b/info in TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:30:07,301 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/650568b5ec8e44d49c4a0cacca3bdeb2, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/f1729f0dd1d44026b4b22aa2951d4474, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a7d15b857a3847a082af5bb56a5d8920] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp, totalSize=186.2 K 2024-12-09T14:30:07,301 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 650568b5ec8e44d49c4a0cacca3bdeb2, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733754556674 2024-12-09T14:30:07,302 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting f1729f0dd1d44026b4b22aa2951d4474, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1733754597110 2024-12-09T14:30:07,302 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7d15b857a3847a082af5bb56a5d8920, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733754597137 2024-12-09T14:30:07,331 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb7a8687dd4764718ce346f3cc62241b#info#compaction#83 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:30:07,331 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/9402483df44b44b89e66c8784e98e354 is 1080, key is row0062/info:/1733754556674/Put/seqid=0 2024-12-09T14:30:07,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741869_1045 (size=180785) 2024-12-09T14:30:07,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741869_1045 (size=180785) 2024-12-09T14:30:07,383 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/9402483df44b44b89e66c8784e98e354 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9402483df44b44b89e66c8784e98e354 2024-12-09T14:30:07,391 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb7a8687dd4764718ce346f3cc62241b/info of bb7a8687dd4764718ce346f3cc62241b into 9402483df44b44b89e66c8784e98e354(size=176.5 K), total size for store is 176.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:30:07,391 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:30:07,391 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., storeName=bb7a8687dd4764718ce346f3cc62241b/info, priority=13, startTime=1733754607299; duration=0sec 2024-12-09T14:30:07,391 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:30:07,391 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb7a8687dd4764718ce346f3cc62241b:info 2024-12-09T14:30:07,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:07,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:08,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:08,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:09,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:30:09,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T14:30:09,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/2f7f2d8838c047e68278e84f66572f79 is 1080, key is row0224/info:/1733754607201/Put/seqid=0 2024-12-09T14:30:09,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741870_1046 (size=12523) 2024-12-09T14:30:09,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741870_1046 (size=12523) 2024-12-09T14:30:09,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/2f7f2d8838c047e68278e84f66572f79 2024-12-09T14:30:09,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/2f7f2d8838c047e68278e84f66572f79 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/2f7f2d8838c047e68278e84f66572f79 2024-12-09T14:30:09,240 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/2f7f2d8838c047e68278e84f66572f79, entries=7, sequenceid=296, filesize=12.2 K 2024-12-09T14:30:09,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for bb7a8687dd4764718ce346f3cc62241b in 29ms, sequenceid=296, compaction requested=false 2024-12-09T14:30:09,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:30:09,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34999 {}] regionserver.HRegion(8855): Flush requested on bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:30:09,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-09T14:30:09,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/3812646c5b50458e982f7a88ac66ef1c is 1080, key is row0231/info:/1733754609213/Put/seqid=0 2024-12-09T14:30:09,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741871_1047 (size=22254) 2024-12-09T14:30:09,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741871_1047 (size=22254) 2024-12-09T14:30:09,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/3812646c5b50458e982f7a88ac66ef1c 2024-12-09T14:30:09,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/3812646c5b50458e982f7a88ac66ef1c as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/3812646c5b50458e982f7a88ac66ef1c 2024-12-09T14:30:09,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/3812646c5b50458e982f7a88ac66ef1c, entries=16, sequenceid=315, filesize=21.7 K 2024-12-09T14:30:09,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=10.51 KB/10760 for bb7a8687dd4764718ce346f3cc62241b in 20ms, sequenceid=315, compaction requested=true 2024-12-09T14:30:09,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:30:09,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb7a8687dd4764718ce346f3cc62241b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T14:30:09,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:30:09,263 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T14:30:09,264 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 215562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T14:30:09,264 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1541): bb7a8687dd4764718ce346f3cc62241b/info is initiating minor compaction (all files) 2024-12-09T14:30:09,264 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bb7a8687dd4764718ce346f3cc62241b/info in TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:30:09,264 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9402483df44b44b89e66c8784e98e354, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/2f7f2d8838c047e68278e84f66572f79, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/3812646c5b50458e982f7a88ac66ef1c] into tmpdir=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp, totalSize=210.5 K 2024-12-09T14:30:09,265 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9402483df44b44b89e66c8784e98e354, keycount=162, bloomtype=ROW, size=176.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733754556674 2024-12-09T14:30:09,265 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f7f2d8838c047e68278e84f66572f79, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733754607201 2024-12-09T14:30:09,265 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3812646c5b50458e982f7a88ac66ef1c, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733754609213 2024-12-09T14:30:09,280 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb7a8687dd4764718ce346f3cc62241b#info#compaction#86 average throughput is 63.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T14:30:09,280 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/41c3d0a7a5824b7487be61db01df29f4 is 1080, key is row0062/info:/1733754556674/Put/seqid=0 2024-12-09T14:30:09,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741872_1048 (size=205781) 2024-12-09T14:30:09,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741872_1048 (size=205781) 2024-12-09T14:30:09,290 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/41c3d0a7a5824b7487be61db01df29f4 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/41c3d0a7a5824b7487be61db01df29f4 2024-12-09T14:30:09,296 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bb7a8687dd4764718ce346f3cc62241b/info of bb7a8687dd4764718ce346f3cc62241b into 41c3d0a7a5824b7487be61db01df29f4(size=201.0 K), total size for store is 201.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T14:30:09,296 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:30:09,296 INFO [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., storeName=bb7a8687dd4764718ce346f3cc62241b/info, priority=13, startTime=1733754609263; duration=0sec 2024-12-09T14:30:09,296 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T14:30:09,296 DEBUG [RS:0;f4e784dc7cb5:34999-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb7a8687dd4764718ce346f3cc62241b:info 2024-12-09T14:30:09,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:09,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:10,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:10,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:11,257 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-09T14:30:11,258 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C34999%2C1733754533657.1733754611258 2024-12-09T14:30:11,279 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,279 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,279 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,279 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,279 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,279 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754534089 with entries=308, filesize=307.10 KB; new WAL /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754611258 2024-12-09T14:30:11,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741833_1009 (size=314476) 2024-12-09T14:30:11,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741833_1009 (size=314476) 2024-12-09T14:30:11,282 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41803:41803),(127.0.0.1/127.0.0.1:40043:40043)] 2024-12-09T14:30:11,282 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754534089 is not closed yet, will try archiving it next time 2024-12-09T14:30:11,285 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 19fa93135a4999e0f57da51576741184: 2024-12-09T14:30:11,285 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing bb7a8687dd4764718ce346f3cc62241b 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T14:30:11,289 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/6ebdd29b6d1244358848d48497c85957 is 1080, key is row0247/info:/1733754609243/Put/seqid=0 2024-12-09T14:30:11,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741874_1050 (size=15760) 2024-12-09T14:30:11,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741874_1050 (size=15760) 2024-12-09T14:30:11,294 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/6ebdd29b6d1244358848d48497c85957 2024-12-09T14:30:11,300 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/.tmp/info/6ebdd29b6d1244358848d48497c85957 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/6ebdd29b6d1244358848d48497c85957 2024-12-09T14:30:11,305 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/6ebdd29b6d1244358848d48497c85957, entries=10, sequenceid=329, filesize=15.4 K 2024-12-09T14:30:11,307 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for bb7a8687dd4764718ce346f3cc62241b in 22ms, sequenceid=329, compaction requested=false 2024-12-09T14:30:11,307 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for bb7a8687dd4764718ce346f3cc62241b: 2024-12-09T14:30:11,307 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-09T14:30:11,312 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/info/32f78d3cfd1944c5a4f6e766856cab44 is 186, key is TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184./info:regioninfo/1733754559725/Put/seqid=0 2024-12-09T14:30:11,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741875_1051 (size=6153) 2024-12-09T14:30:11,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741875_1051 (size=6153) 2024-12-09T14:30:11,318 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/info/32f78d3cfd1944c5a4f6e766856cab44 2024-12-09T14:30:11,324 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/.tmp/info/32f78d3cfd1944c5a4f6e766856cab44 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/info/32f78d3cfd1944c5a4f6e766856cab44 2024-12-09T14:30:11,328 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/info/32f78d3cfd1944c5a4f6e766856cab44, entries=5, sequenceid=21, filesize=6.0 K 2024-12-09T14:30:11,330 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=21, compaction requested=false 2024-12-09T14:30:11,330 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T14:30:11,330 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C34999%2C1733754533657.1733754611330 2024-12-09T14:30:11,335 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,335 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,335 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,335 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,336 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,336 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754611258 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754611330 2024-12-09T14:30:11,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41803:41803),(127.0.0.1/127.0.0.1:40043:40043)] 2024-12-09T14:30:11,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754611258 is not closed yet, will try archiving it next time 2024-12-09T14:30:11,337 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754534089 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/oldWALs/f4e784dc7cb5%2C34999%2C1733754533657.1733754534089 2024-12-09T14:30:11,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741873_1049 (size=731) 2024-12-09T14:30:11,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741873_1049 (size=731) 2024-12-09T14:30:11,338 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T14:30:11,338 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/WALs/f4e784dc7cb5,34999,1733754533657/f4e784dc7cb5%2C34999%2C1733754533657.1733754611258 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/oldWALs/f4e784dc7cb5%2C34999%2C1733754533657.1733754611258 2024-12-09T14:30:11,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T14:30:11,438 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:30:11,438 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:30:11,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:11,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:11,438 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T14:30:11,439 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T14:30:11,439 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1075957419, stopped=false 2024-12-09T14:30:11,439 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f4e784dc7cb5,46445,1733754533603 2024-12-09T14:30:11,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:30:11,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:30:11,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:11,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:11,441 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:30:11,441 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:30:11,441 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:30:11,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:11,441 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f4e784dc7cb5,34999,1733754533657' ***** 2024-12-09T14:30:11,441 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T14:30:11,442 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T14:30:11,442 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:30:11,442 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:30:11,442 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T14:30:11,442 INFO [RS:0;f4e784dc7cb5:34999 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T14:30:11,442 INFO [RS:0;f4e784dc7cb5:34999 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T14:30:11,442 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(3091): Received CLOSE for 19fa93135a4999e0f57da51576741184 2024-12-09T14:30:11,442 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(3091): Received CLOSE for bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:30:11,443 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(959): stopping server f4e784dc7cb5,34999,1733754533657 2024-12-09T14:30:11,443 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:30:11,443 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 19fa93135a4999e0f57da51576741184, disabling compactions & flushes 2024-12-09T14:30:11,443 INFO [RS:0;f4e784dc7cb5:34999 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f4e784dc7cb5:34999. 2024-12-09T14:30:11,443 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:30:11,443 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:30:11,443 DEBUG [RS:0;f4e784dc7cb5:34999 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:30:11,443 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. after waiting 0 ms 2024-12-09T14:30:11,443 DEBUG [RS:0;f4e784dc7cb5:34999 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:11,443 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:30:11,443 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T14:30:11,443 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T14:30:11,443 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T14:30:11,443 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T14:30:11,443 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-09T14:30:11,443 DEBUG [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1325): Online Regions={19fa93135a4999e0f57da51576741184=TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184., bb7a8687dd4764718ce346f3cc62241b=TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T14:30:11,443 DEBUG [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 19fa93135a4999e0f57da51576741184, bb7a8687dd4764718ce346f3cc62241b 2024-12-09T14:30:11,443 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:30:11,443 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:30:11,443 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:30:11,443 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e->hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53-bottom] to archive 2024-12-09T14:30:11,443 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:30:11,443 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:30:11,444 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T14:30:11,446 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:30:11,447 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=f4e784dc7cb5:46445 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T14:30:11,447 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-09T14:30:11,448 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-09T14:30:11,449 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:30:11,449 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:30:11,449 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754611443Running coprocessor pre-close hooks at 1733754611443Disabling compacts and flushes for region at 1733754611443Disabling writes for close at 1733754611443Writing region close event to WAL at 1733754611445 (+2 ms)Running coprocessor post-close hooks at 1733754611449 (+4 ms)Closed at 1733754611449 2024-12-09T14:30:11,449 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T14:30:11,451 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/19fa93135a4999e0f57da51576741184/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=121 2024-12-09T14:30:11,451 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:30:11,451 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 19fa93135a4999e0f57da51576741184: Waiting for close lock at 1733754611443Running coprocessor pre-close hooks at 1733754611443Disabling compacts and flushes for region at 1733754611443Disabling writes for close at 1733754611443Writing region close event to WAL at 1733754611447 (+4 ms)Running coprocessor post-close hooks at 1733754611451 (+4 ms)Closed at 1733754611451 2024-12-09T14:30:11,451 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733754558789.19fa93135a4999e0f57da51576741184. 2024-12-09T14:30:11,452 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bb7a8687dd4764718ce346f3cc62241b, disabling compactions & flushes 2024-12-09T14:30:11,452 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:30:11,452 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:30:11,452 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. after waiting 0 ms 2024-12-09T14:30:11,452 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:30:11,452 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e->hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/0862a744e65ec9e18a19a29534f5321e/info/1ccd25963e1043d0a67318bd9df69a53-top, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-54ab0e21f41e4aaaa8ec4960130061ba, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8bc5745262464e688ca2f98a5f87ace8, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-6978a453017d4e588b8894f35f9632fa, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/50de1dfdeca44584bb06ab7e58b5673a, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/7d0e146bb4594902bea156908eeadf9c, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a411ca588fad4fb8ab1e274e658b6136, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/48ea9e5fff6d40ffbd055b54cc7ae9ac, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/531b4afd3d2a436da7f31ec8c7083352, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8e3ad82119d740139e89411095223797, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e2f9e35bee914621aad13e7cbaf0b43f, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9bcfafd125884e119891417bf6b679d7, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/c08bd61a779c4eb4b70e7d593d9ce242, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/ce9d07d8e85f42429d5abb7be740d146, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/650568b5ec8e44d49c4a0cacca3bdeb2, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e449716fc4d24326b5e92d4cd0cf99cf, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/f1729f0dd1d44026b4b22aa2951d4474, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9402483df44b44b89e66c8784e98e354, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a7d15b857a3847a082af5bb56a5d8920, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/2f7f2d8838c047e68278e84f66572f79, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/3812646c5b50458e982f7a88ac66ef1c] to archive 2024-12-09T14:30:11,453 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T14:30:11,455 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/1ccd25963e1043d0a67318bd9df69a53.0862a744e65ec9e18a19a29534f5321e 2024-12-09T14:30:11,456 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-54ab0e21f41e4aaaa8ec4960130061ba to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-54ab0e21f41e4aaaa8ec4960130061ba 2024-12-09T14:30:11,457 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8bc5745262464e688ca2f98a5f87ace8 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8bc5745262464e688ca2f98a5f87ace8 2024-12-09T14:30:11,458 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-6978a453017d4e588b8894f35f9632fa to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/TestLogRolling-testLogRolling=0862a744e65ec9e18a19a29534f5321e-6978a453017d4e588b8894f35f9632fa 2024-12-09T14:30:11,459 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/50de1dfdeca44584bb06ab7e58b5673a to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/50de1dfdeca44584bb06ab7e58b5673a 2024-12-09T14:30:11,460 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/7d0e146bb4594902bea156908eeadf9c to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/7d0e146bb4594902bea156908eeadf9c 2024-12-09T14:30:11,462 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a411ca588fad4fb8ab1e274e658b6136 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a411ca588fad4fb8ab1e274e658b6136 2024-12-09T14:30:11,463 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/48ea9e5fff6d40ffbd055b54cc7ae9ac to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/48ea9e5fff6d40ffbd055b54cc7ae9ac 2024-12-09T14:30:11,465 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/531b4afd3d2a436da7f31ec8c7083352 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/531b4afd3d2a436da7f31ec8c7083352 2024-12-09T14:30:11,467 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8e3ad82119d740139e89411095223797 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/8e3ad82119d740139e89411095223797 2024-12-09T14:30:11,468 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e2f9e35bee914621aad13e7cbaf0b43f to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e2f9e35bee914621aad13e7cbaf0b43f 2024-12-09T14:30:11,470 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9bcfafd125884e119891417bf6b679d7 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9bcfafd125884e119891417bf6b679d7 2024-12-09T14:30:11,472 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/c08bd61a779c4eb4b70e7d593d9ce242 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/c08bd61a779c4eb4b70e7d593d9ce242 2024-12-09T14:30:11,473 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/ce9d07d8e85f42429d5abb7be740d146 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/ce9d07d8e85f42429d5abb7be740d146 2024-12-09T14:30:11,475 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/650568b5ec8e44d49c4a0cacca3bdeb2 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/650568b5ec8e44d49c4a0cacca3bdeb2 2024-12-09T14:30:11,476 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e449716fc4d24326b5e92d4cd0cf99cf to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/e449716fc4d24326b5e92d4cd0cf99cf 2024-12-09T14:30:11,478 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/f1729f0dd1d44026b4b22aa2951d4474 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/f1729f0dd1d44026b4b22aa2951d4474 2024-12-09T14:30:11,479 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9402483df44b44b89e66c8784e98e354 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/9402483df44b44b89e66c8784e98e354 2024-12-09T14:30:11,481 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a7d15b857a3847a082af5bb56a5d8920 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/a7d15b857a3847a082af5bb56a5d8920 2024-12-09T14:30:11,482 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/2f7f2d8838c047e68278e84f66572f79 to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/2f7f2d8838c047e68278e84f66572f79 2024-12-09T14:30:11,484 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/3812646c5b50458e982f7a88ac66ef1c to hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/archive/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/info/3812646c5b50458e982f7a88ac66ef1c 2024-12-09T14:30:11,484 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [8bc5745262464e688ca2f98a5f87ace8=40830, 50de1dfdeca44584bb06ab7e58b5673a=12513, 7d0e146bb4594902bea156908eeadf9c=66869, a411ca588fad4fb8ab1e274e658b6136=23316, 48ea9e5fff6d40ffbd055b54cc7ae9ac=16828, 531b4afd3d2a436da7f31ec8c7083352=95076, 8e3ad82119d740139e89411095223797=21156, e2f9e35bee914621aad13e7cbaf0b43f=21156, 9bcfafd125884e119891417bf6b679d7=118898, c08bd61a779c4eb4b70e7d593d9ce242=12516, ce9d07d8e85f42429d5abb7be740d146=25472, 650568b5ec8e44d49c4a0cacca3bdeb2=148311, e449716fc4d24326b5e92d4cd0cf99cf=13594, f1729f0dd1d44026b4b22aa2951d4474=24406, 9402483df44b44b89e66c8784e98e354=180785, a7d15b857a3847a082af5bb56a5d8920=17918, 2f7f2d8838c047e68278e84f66572f79=12523, 3812646c5b50458e982f7a88ac66ef1c=22254] 2024-12-09T14:30:11,488 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/data/default/TestLogRolling-testLogRolling/bb7a8687dd4764718ce346f3cc62241b/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=121 2024-12-09T14:30:11,489 INFO [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:30:11,489 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bb7a8687dd4764718ce346f3cc62241b: Waiting for close lock at 1733754611452Running coprocessor pre-close hooks at 1733754611452Disabling compacts and flushes for region at 1733754611452Disabling writes for close at 1733754611452Writing region close event to WAL at 1733754611484 (+32 ms)Running coprocessor post-close hooks at 1733754611489 (+5 ms)Closed at 1733754611489 2024-12-09T14:30:11,490 DEBUG [RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733754558789.bb7a8687dd4764718ce346f3cc62241b. 2024-12-09T14:30:11,643 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(976): stopping server f4e784dc7cb5,34999,1733754533657; all regions closed. 2024-12-09T14:30:11,644 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,644 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,644 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,644 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,644 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741834_1010 (size=8107) 2024-12-09T14:30:11,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741834_1010 (size=8107) 2024-12-09T14:30:11,649 DEBUG [RS:0;f4e784dc7cb5:34999 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/oldWALs 2024-12-09T14:30:11,649 INFO [RS:0;f4e784dc7cb5:34999 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C34999%2C1733754533657.meta:.meta(num 1733754534277) 2024-12-09T14:30:11,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741876_1052 (size=778) 2024-12-09T14:30:11,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741876_1052 (size=778) 2024-12-09T14:30:11,655 DEBUG [RS:0;f4e784dc7cb5:34999 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/oldWALs 2024-12-09T14:30:11,655 INFO [RS:0;f4e784dc7cb5:34999 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C34999%2C1733754533657:(num 1733754611330) 2024-12-09T14:30:11,655 DEBUG [RS:0;f4e784dc7cb5:34999 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:11,655 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:30:11,655 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:30:11,655 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.ChoreService(370): Chore service for: regionserver/f4e784dc7cb5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T14:30:11,655 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:30:11,655 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:30:11,656 INFO [RS:0;f4e784dc7cb5:34999 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34999 2024-12-09T14:30:11,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:30:11,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f4e784dc7cb5,34999,1733754533657 2024-12-09T14:30:11,658 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:30:11,659 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f4e784dc7cb5,34999,1733754533657] 2024-12-09T14:30:11,662 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f4e784dc7cb5,34999,1733754533657 already deleted, retry=false 2024-12-09T14:30:11,662 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f4e784dc7cb5,34999,1733754533657 expired; onlineServers=0 2024-12-09T14:30:11,662 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f4e784dc7cb5,46445,1733754533603' ***** 2024-12-09T14:30:11,662 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T14:30:11,662 INFO [M:0;f4e784dc7cb5:46445 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:30:11,662 INFO [M:0;f4e784dc7cb5:46445 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:30:11,662 DEBUG [M:0;f4e784dc7cb5:46445 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T14:30:11,662 DEBUG [M:0;f4e784dc7cb5:46445 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T14:30:11,662 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T14:30:11,662 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754533886 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754533886,5,FailOnTimeoutGroup] 2024-12-09T14:30:11,662 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754533887 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754533887,5,FailOnTimeoutGroup] 2024-12-09T14:30:11,662 INFO [M:0;f4e784dc7cb5:46445 {}] hbase.ChoreService(370): Chore service for: master/f4e784dc7cb5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T14:30:11,662 INFO [M:0;f4e784dc7cb5:46445 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:30:11,662 DEBUG [M:0;f4e784dc7cb5:46445 {}] master.HMaster(1795): Stopping service threads 2024-12-09T14:30:11,662 INFO [M:0;f4e784dc7cb5:46445 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T14:30:11,662 INFO [M:0;f4e784dc7cb5:46445 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:30:11,663 INFO [M:0;f4e784dc7cb5:46445 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T14:30:11,663 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T14:30:11,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T14:30:11,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:11,664 DEBUG [M:0;f4e784dc7cb5:46445 {}] zookeeper.ZKUtil(347): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T14:30:11,664 WARN [M:0;f4e784dc7cb5:46445 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T14:30:11,664 INFO [M:0;f4e784dc7cb5:46445 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/.lastflushedseqids 2024-12-09T14:30:11,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741877_1053 (size=228) 2024-12-09T14:30:11,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741877_1053 (size=228) 2024-12-09T14:30:11,670 INFO [M:0;f4e784dc7cb5:46445 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T14:30:11,670 INFO [M:0;f4e784dc7cb5:46445 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T14:30:11,670 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:30:11,670 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:11,670 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:11,670 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:30:11,670 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:11,671 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-12-09T14:30:11,687 DEBUG [M:0;f4e784dc7cb5:46445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0cfa1e2c5f7b4371a3b69b7a4a13169f is 82, key is hbase:meta,,1/info:regioninfo/1733754534298/Put/seqid=0 2024-12-09T14:30:11,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741878_1054 (size=5672) 2024-12-09T14:30:11,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741878_1054 (size=5672) 2024-12-09T14:30:11,693 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0cfa1e2c5f7b4371a3b69b7a4a13169f 2024-12-09T14:30:11,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:11,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:11,713 DEBUG [M:0;f4e784dc7cb5:46445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63b316f46c8542c4930f3047bdbcfa9f is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733754534762/Put/seqid=0 2024-12-09T14:30:11,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741879_1055 (size=7091) 2024-12-09T14:30:11,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741879_1055 (size=7091) 2024-12-09T14:30:11,718 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63b316f46c8542c4930f3047bdbcfa9f 2024-12-09T14:30:11,722 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 63b316f46c8542c4930f3047bdbcfa9f 2024-12-09T14:30:11,737 DEBUG [M:0;f4e784dc7cb5:46445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/78cb7d8ff2b64229a37ac5e46248a340 is 69, key is f4e784dc7cb5,34999,1733754533657/rs:state/1733754533916/Put/seqid=0 2024-12-09T14:30:11,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741880_1056 (size=5156) 2024-12-09T14:30:11,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741880_1056 (size=5156) 2024-12-09T14:30:11,742 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/78cb7d8ff2b64229a37ac5e46248a340 2024-12-09T14:30:11,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:30:11,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34999-0x1012b9646f20001, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:30:11,760 INFO [RS:0;f4e784dc7cb5:34999 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:30:11,760 INFO [RS:0;f4e784dc7cb5:34999 {}] regionserver.HRegionServer(1031): Exiting; stopping=f4e784dc7cb5,34999,1733754533657; zookeeper connection closed. 2024-12-09T14:30:11,760 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@74111e7f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@74111e7f 2024-12-09T14:30:11,760 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T14:30:11,761 DEBUG [M:0;f4e784dc7cb5:46445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/86334a1c977441009d3f4722b11a3feb is 52, key is load_balancer_on/state:d/1733754534389/Put/seqid=0 2024-12-09T14:30:11,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741881_1057 (size=5056) 2024-12-09T14:30:11,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741881_1057 (size=5056) 2024-12-09T14:30:11,766 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/86334a1c977441009d3f4722b11a3feb 2024-12-09T14:30:11,771 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0cfa1e2c5f7b4371a3b69b7a4a13169f as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0cfa1e2c5f7b4371a3b69b7a4a13169f 2024-12-09T14:30:11,776 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0cfa1e2c5f7b4371a3b69b7a4a13169f, entries=8, sequenceid=125, filesize=5.5 K 2024-12-09T14:30:11,777 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63b316f46c8542c4930f3047bdbcfa9f as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/63b316f46c8542c4930f3047bdbcfa9f 2024-12-09T14:30:11,782 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 63b316f46c8542c4930f3047bdbcfa9f 2024-12-09T14:30:11,782 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/63b316f46c8542c4930f3047bdbcfa9f, entries=13, sequenceid=125, filesize=6.9 K 2024-12-09T14:30:11,783 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/78cb7d8ff2b64229a37ac5e46248a340 as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/78cb7d8ff2b64229a37ac5e46248a340 2024-12-09T14:30:11,788 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/78cb7d8ff2b64229a37ac5e46248a340, entries=1, sequenceid=125, filesize=5.0 K 2024-12-09T14:30:11,789 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/86334a1c977441009d3f4722b11a3feb as hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/86334a1c977441009d3f4722b11a3feb 2024-12-09T14:30:11,794 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46419/user/jenkins/test-data/8838ef82-d608-47c4-8122-7391921ba41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/86334a1c977441009d3f4722b11a3feb, entries=1, sequenceid=125, filesize=4.9 K 2024-12-09T14:30:11,795 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=125, compaction requested=false 2024-12-09T14:30:11,796 INFO [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:11,796 DEBUG [M:0;f4e784dc7cb5:46445 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754611670Disabling compacts and flushes for region at 1733754611670Disabling writes for close at 1733754611670Obtaining lock to block concurrent updates at 1733754611671 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733754611671Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1733754611671Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733754611672 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733754611672Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733754611687 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733754611687Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733754611697 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733754611712 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733754611713 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733754611722 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733754611736 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733754611736Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733754611746 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733754611760 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733754611760Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3502732e: reopening flushed file at 1733754611771 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77d78861: reopening flushed file at 1733754611776 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a8a2325: reopening flushed file at 1733754611782 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50218447: reopening flushed file at 1733754611788 (+6 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=125, compaction requested=false at 1733754611795 (+7 ms)Writing region close event to WAL at 1733754611796 (+1 ms)Closed at 1733754611796 2024-12-09T14:30:11,797 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,797 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,797 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,797 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,797 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:11,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741830_1006 (size=61332) 2024-12-09T14:30:11,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35043 is added to blk_1073741830_1006 (size=61332) 2024-12-09T14:30:11,950 INFO [regionserver/f4e784dc7cb5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:30:12,201 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:30:12,202 INFO [M:0;f4e784dc7cb5:46445 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T14:30:12,202 INFO [M:0;f4e784dc7cb5:46445 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46445 2024-12-09T14:30:12,202 INFO [M:0;f4e784dc7cb5:46445 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:30:12,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:30:12,304 INFO [M:0;f4e784dc7cb5:46445 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:30:12,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46445-0x1012b9646f20000, quorum=127.0.0.1:55084, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:30:12,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f87fe6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:30:12,311 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64d2170c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:30:12,311 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:30:12,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5689196f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:30:12,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2de80e16{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.log.dir/,STOPPED} 2024-12-09T14:30:12,314 WARN [BP-1146165504-172.17.0.3-1733754532655 heartbeating to localhost/127.0.0.1:46419 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:30:12,314 WARN [BP-1146165504-172.17.0.3-1733754532655 heartbeating to localhost/127.0.0.1:46419 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1146165504-172.17.0.3-1733754532655 (Datanode Uuid 3dd3c2fb-0b42-4fbe-b24e-afd1caa41720) service to localhost/127.0.0.1:46419 2024-12-09T14:30:12,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:30:12,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/data/data3/current/BP-1146165504-172.17.0.3-1733754532655 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:30:12,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:30:12,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/data/data4/current/BP-1146165504-172.17.0.3-1733754532655 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:30:12,315 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:30:12,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4881a2ed{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:30:12,320 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@374d3611{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:30:12,320 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:30:12,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d8c7847{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:30:12,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a5ea7cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.log.dir/,STOPPED} 2024-12-09T14:30:12,323 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:30:12,323 WARN [BP-1146165504-172.17.0.3-1733754532655 heartbeating to localhost/127.0.0.1:46419 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:30:12,323 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:30:12,323 WARN [BP-1146165504-172.17.0.3-1733754532655 heartbeating to localhost/127.0.0.1:46419 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1146165504-172.17.0.3-1733754532655 (Datanode Uuid 3e99010d-90a7-43df-b927-c1c74f03c18b) service to localhost/127.0.0.1:46419 2024-12-09T14:30:12,324 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/data/data1/current/BP-1146165504-172.17.0.3-1733754532655 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:30:12,324 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/cluster_6d7068ad-3335-bfcd-3736-aea58bf5faaf/data/data2/current/BP-1146165504-172.17.0.3-1733754532655 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:30:12,324 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:30:12,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bdca924{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:30:12,336 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27fb1a0a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:30:12,336 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:30:12,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b767eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:30:12,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fda4535{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.log.dir/,STOPPED} 2024-12-09T14:30:12,345 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T14:30:12,390 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T14:30:12,402 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=226 (was 206) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:46419 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46419 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46419 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46419 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46419 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46419 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=171 (was 230), ProcessCount=11 (was 11), AvailableMemoryMB=5449 (was 4842) - AvailableMemoryMB LEAK? - 2024-12-09T14:30:12,418 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=226, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=171, ProcessCount=11, AvailableMemoryMB=5449 2024-12-09T14:30:12,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T14:30:12,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.log.dir so I do NOT create it in target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d 2024-12-09T14:30:12,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2dee360d-c677-381b-3f91-88d25019c4ab/hadoop.tmp.dir so I do NOT create it in target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d 2024-12-09T14:30:12,418 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7, deleteOnExit=true 2024-12-09T14:30:12,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T14:30:12,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/test.cache.data in system properties and HBase conf 2024-12-09T14:30:12,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T14:30:12,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/hadoop.log.dir in system properties and HBase conf 2024-12-09T14:30:12,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T14:30:12,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T14:30:12,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T14:30:12,419 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T14:30:12,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/nfs.dump.dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/java.io.tmpdir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T14:30:12,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T14:30:12,440 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:30:12,514 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:30:12,517 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:30:12,527 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:30:12,527 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:30:12,527 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:30:12,529 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:30:12,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64c76f50{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:30:12,530 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30964c14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:30:12,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35722c03{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/java.io.tmpdir/jetty-localhost-40191-hadoop-hdfs-3_4_1-tests_jar-_-any-12152604932004470172/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:30:12,676 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d175de5{HTTP/1.1, (http/1.1)}{localhost:40191} 2024-12-09T14:30:12,676 INFO [Time-limited test {}] server.Server(415): Started @320534ms 2024-12-09T14:30:12,690 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T14:30:12,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:12,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:12,775 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:30:12,778 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:30:12,779 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:30:12,779 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:30:12,779 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T14:30:12,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22748d48{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:30:12,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@677f535e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:30:12,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8035060{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/java.io.tmpdir/jetty-localhost-46171-hadoop-hdfs-3_4_1-tests_jar-_-any-15096419992280787906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:30:12,906 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a4098d6{HTTP/1.1, (http/1.1)}{localhost:46171} 2024-12-09T14:30:12,906 INFO [Time-limited test {}] server.Server(415): Started @320764ms 2024-12-09T14:30:12,907 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:30:12,938 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T14:30:12,941 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T14:30:12,942 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T14:30:12,942 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T14:30:12,942 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T14:30:12,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20339ff2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/hadoop.log.dir/,AVAILABLE} 2024-12-09T14:30:12,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@231297c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T14:30:13,011 WARN [Thread-2459 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/data/data1/current/BP-1003568373-172.17.0.3-1733754612446/current, will proceed with Du for space computation calculation, 2024-12-09T14:30:13,011 WARN [Thread-2460 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/data/data2/current/BP-1003568373-172.17.0.3-1733754612446/current, will proceed with Du for space computation calculation, 2024-12-09T14:30:13,052 WARN [Thread-2438 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:30:13,056 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f0a2a1f92eb7ada with lease ID 0x985943f0f9bb5dd9: Processing first storage report for DS-940bfc97-976d-4802-8bc6-9c0fc2636e9f from datanode DatanodeRegistration(127.0.0.1:36731, datanodeUuid=b920a460-cab3-4116-b7cc-48035232ea49, infoPort=35075, infoSecurePort=0, ipcPort=36399, storageInfo=lv=-57;cid=testClusterID;nsid=1554310345;c=1733754612446) 2024-12-09T14:30:13,056 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f0a2a1f92eb7ada with lease ID 0x985943f0f9bb5dd9: from storage DS-940bfc97-976d-4802-8bc6-9c0fc2636e9f node DatanodeRegistration(127.0.0.1:36731, datanodeUuid=b920a460-cab3-4116-b7cc-48035232ea49, infoPort=35075, infoSecurePort=0, ipcPort=36399, storageInfo=lv=-57;cid=testClusterID;nsid=1554310345;c=1733754612446), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:30:13,056 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f0a2a1f92eb7ada with lease ID 0x985943f0f9bb5dd9: Processing first storage report for DS-ebf5c99d-aa0b-4bfe-bda3-33eea93cea1b from datanode DatanodeRegistration(127.0.0.1:36731, datanodeUuid=b920a460-cab3-4116-b7cc-48035232ea49, infoPort=35075, infoSecurePort=0, ipcPort=36399, storageInfo=lv=-57;cid=testClusterID;nsid=1554310345;c=1733754612446) 2024-12-09T14:30:13,056 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f0a2a1f92eb7ada with lease ID 0x985943f0f9bb5dd9: from storage DS-ebf5c99d-aa0b-4bfe-bda3-33eea93cea1b node DatanodeRegistration(127.0.0.1:36731, datanodeUuid=b920a460-cab3-4116-b7cc-48035232ea49, infoPort=35075, infoSecurePort=0, ipcPort=36399, storageInfo=lv=-57;cid=testClusterID;nsid=1554310345;c=1733754612446), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T14:30:13,080 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@372003a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/java.io.tmpdir/jetty-localhost-34955-hadoop-hdfs-3_4_1-tests_jar-_-any-11905000441607298397/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:30:13,080 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60fdf071{HTTP/1.1, (http/1.1)}{localhost:34955} 2024-12-09T14:30:13,080 INFO [Time-limited test {}] server.Server(415): Started @320938ms 2024-12-09T14:30:13,082 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T14:30:13,193 WARN [Thread-2485 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/data/data3/current/BP-1003568373-172.17.0.3-1733754612446/current, will proceed with Du for space computation calculation, 2024-12-09T14:30:13,193 WARN [Thread-2486 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/data/data4/current/BP-1003568373-172.17.0.3-1733754612446/current, will proceed with Du for space computation calculation, 2024-12-09T14:30:13,210 WARN [Thread-2474 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T14:30:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9a2f4db5020404e with lease ID 0x985943f0f9bb5dda: Processing first storage report for DS-67e2c791-53bf-43ec-b352-34d184030fda from datanode DatanodeRegistration(127.0.0.1:43947, datanodeUuid=2e2c5644-7ecf-43f3-bbe3-6335aff5849e, infoPort=42543, infoSecurePort=0, ipcPort=37243, storageInfo=lv=-57;cid=testClusterID;nsid=1554310345;c=1733754612446) 2024-12-09T14:30:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9a2f4db5020404e with lease ID 0x985943f0f9bb5dda: from storage DS-67e2c791-53bf-43ec-b352-34d184030fda node DatanodeRegistration(127.0.0.1:43947, datanodeUuid=2e2c5644-7ecf-43f3-bbe3-6335aff5849e, infoPort=42543, infoSecurePort=0, ipcPort=37243, storageInfo=lv=-57;cid=testClusterID;nsid=1554310345;c=1733754612446), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:30:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9a2f4db5020404e with lease ID 0x985943f0f9bb5dda: Processing first storage report for DS-90a40636-39d3-4d48-9078-0bc83aa13db2 from datanode DatanodeRegistration(127.0.0.1:43947, datanodeUuid=2e2c5644-7ecf-43f3-bbe3-6335aff5849e, infoPort=42543, infoSecurePort=0, ipcPort=37243, storageInfo=lv=-57;cid=testClusterID;nsid=1554310345;c=1733754612446) 2024-12-09T14:30:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9a2f4db5020404e with lease ID 0x985943f0f9bb5dda: from storage DS-90a40636-39d3-4d48-9078-0bc83aa13db2 node DatanodeRegistration(127.0.0.1:43947, datanodeUuid=2e2c5644-7ecf-43f3-bbe3-6335aff5849e, infoPort=42543, infoSecurePort=0, ipcPort=37243, storageInfo=lv=-57;cid=testClusterID;nsid=1554310345;c=1733754612446), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T14:30:13,216 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d 2024-12-09T14:30:13,219 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/zookeeper_0, clientPort=60611, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T14:30:13,219 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60611 2024-12-09T14:30:13,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:30:13,221 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:30:13,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:30:13,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741825_1001 (size=7) 2024-12-09T14:30:13,230 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223 with version=8 2024-12-09T14:30:13,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44241/user/jenkins/test-data/4bf6857a-953b-8251-2787-e3e19a93e35d/hbase-staging 2024-12-09T14:30:13,232 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:30:13,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:30:13,233 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:30:13,233 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:30:13,233 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:30:13,233 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:30:13,233 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T14:30:13,233 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:30:13,233 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42915 2024-12-09T14:30:13,234 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42915 connecting to ZooKeeper ensemble=127.0.0.1:60611 2024-12-09T14:30:13,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429150x0, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:30:13,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42915-0x1012b977e020000 connected 2024-12-09T14:30:13,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:30:13,259 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:30:13,260 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:30:13,261 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223, hbase.cluster.distributed=false 2024-12-09T14:30:13,262 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:30:13,263 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42915 2024-12-09T14:30:13,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42915 2024-12-09T14:30:13,266 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42915 2024-12-09T14:30:13,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42915 2024-12-09T14:30:13,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42915 2024-12-09T14:30:13,286 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f4e784dc7cb5:0 server-side Connection retries=45 2024-12-09T14:30:13,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:30:13,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T14:30:13,286 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T14:30:13,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T14:30:13,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T14:30:13,286 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T14:30:13,286 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T14:30:13,287 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33515 2024-12-09T14:30:13,288 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33515 connecting to ZooKeeper ensemble=127.0.0.1:60611 2024-12-09T14:30:13,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:30:13,290 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:30:13,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:335150x0, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T14:30:13,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33515-0x1012b977e020001 connected 2024-12-09T14:30:13,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:30:13,295 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T14:30:13,296 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T14:30:13,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T14:30:13,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T14:30:13,298 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33515 2024-12-09T14:30:13,298 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33515 2024-12-09T14:30:13,298 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33515 2024-12-09T14:30:13,299 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33515 2024-12-09T14:30:13,299 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33515 2024-12-09T14:30:13,311 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f4e784dc7cb5:42915 2024-12-09T14:30:13,311 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:13,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:30:13,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:30:13,313 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:13,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T14:30:13,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,315 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T14:30:13,316 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f4e784dc7cb5,42915,1733754613232 from backup master directory 2024-12-09T14:30:13,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:13,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:30:13,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T14:30:13,317 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:30:13,317 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:13,321 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/hbase.id] with ID: 19d3d302-06ea-48f2-8b87-4295b190cf90 2024-12-09T14:30:13,321 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/.tmp/hbase.id 2024-12-09T14:30:13,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:30:13,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741826_1002 (size=42) 2024-12-09T14:30:13,327 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/.tmp/hbase.id]:[hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/hbase.id] 2024-12-09T14:30:13,336 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:30:13,336 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T14:30:13,337 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T14:30:13,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:30:13,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741827_1003 (size=196) 2024-12-09T14:30:13,347 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T14:30:13,347 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T14:30:13,347 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:30:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:30:13,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741828_1004 (size=1189) 2024-12-09T14:30:13,355 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store 2024-12-09T14:30:13,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:30:13,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741829_1005 (size=34) 2024-12-09T14:30:13,364 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:30:13,364 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:30:13,365 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:13,365 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:13,365 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:30:13,365 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:13,365 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:13,365 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754613364Disabling compacts and flushes for region at 1733754613364Disabling writes for close at 1733754613365 (+1 ms)Writing region close event to WAL at 1733754613365Closed at 1733754613365 2024-12-09T14:30:13,365 WARN [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/.initializing 2024-12-09T14:30:13,365 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/WALs/f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:13,368 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C42915%2C1733754613232, suffix=, logDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/WALs/f4e784dc7cb5,42915,1733754613232, archiveDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/oldWALs, maxLogs=10 2024-12-09T14:30:13,368 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C42915%2C1733754613232.1733754613368 2024-12-09T14:30:13,372 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/WALs/f4e784dc7cb5,42915,1733754613232/f4e784dc7cb5%2C42915%2C1733754613232.1733754613368 2024-12-09T14:30:13,373 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35075:35075),(127.0.0.1/127.0.0.1:42543:42543)] 2024-12-09T14:30:13,373 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:30:13,374 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:30:13,374 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,374 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T14:30:13,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:13,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T14:30:13,381 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:30:13,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T14:30:13,383 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:30:13,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T14:30:13,384 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T14:30:13,385 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,385 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,386 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,387 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,387 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,387 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T14:30:13,388 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T14:30:13,390 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:30:13,390 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835603, jitterRate=0.06252487003803253}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T14:30:13,391 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733754613374Initializing all the Stores at 1733754613374Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754613374Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754613378 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754613378Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754613378Cleaning up temporary data from old regions at 1733754613387 (+9 ms)Region opened successfully at 1733754613391 (+4 ms) 2024-12-09T14:30:13,391 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T14:30:13,394 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c88a0ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:30:13,394 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T14:30:13,395 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T14:30:13,395 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T14:30:13,395 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T14:30:13,395 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T14:30:13,396 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T14:30:13,396 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T14:30:13,397 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T14:30:13,398 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T14:30:13,400 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T14:30:13,400 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T14:30:13,400 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T14:30:13,402 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T14:30:13,402 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T14:30:13,403 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T14:30:13,404 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T14:30:13,405 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T14:30:13,407 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T14:30:13,409 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T14:30:13,410 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T14:30:13,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:30:13,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T14:30:13,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,413 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f4e784dc7cb5,42915,1733754613232, sessionid=0x1012b977e020000, setting cluster-up flag (Was=false) 2024-12-09T14:30:13,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,421 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T14:30:13,422 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:13,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,431 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T14:30:13,432 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:13,433 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T14:30:13,435 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T14:30:13,435 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T14:30:13,435 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T14:30:13,435 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f4e784dc7cb5,42915,1733754613232 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T14:30:13,437 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:30:13,437 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:30:13,437 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:30:13,437 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=5, maxPoolSize=5 2024-12-09T14:30:13,437 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f4e784dc7cb5:0, corePoolSize=10, maxPoolSize=10 2024-12-09T14:30:13,437 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,437 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:30:13,437 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,438 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:30:13,438 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T14:30:13,439 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733754643439 2024-12-09T14:30:13,439 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T14:30:13,439 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T14:30:13,439 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T14:30:13,439 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T14:30:13,439 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T14:30:13,439 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T14:30:13,440 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,440 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,440 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T14:30:13,440 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T14:30:13,440 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T14:30:13,440 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T14:30:13,444 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T14:30:13,444 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T14:30:13,444 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754613444,5,FailOnTimeoutGroup] 2024-12-09T14:30:13,446 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754613444,5,FailOnTimeoutGroup] 2024-12-09T14:30:13,446 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,446 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T14:30:13,446 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,447 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:30:13,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741831_1007 (size=1321) 2024-12-09T14:30:13,451 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T14:30:13,451 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223 2024-12-09T14:30:13,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:30:13,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741832_1008 (size=32) 2024-12-09T14:30:13,460 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:30:13,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:30:13,464 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:30:13,464 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:13,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:30:13,466 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:30:13,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:13,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:30:13,468 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:30:13,468 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:13,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:30:13,470 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:30:13,470 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:13,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:13,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:30:13,471 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740 2024-12-09T14:30:13,471 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740 2024-12-09T14:30:13,472 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:30:13,472 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:30:13,472 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:30:13,473 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:30:13,476 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T14:30:13,476 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876496, jitterRate=0.1145232617855072}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:30:13,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733754613460Initializing all the Stores at 1733754613461 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754613461Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754613462 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754613462Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754613462Cleaning up temporary data from old regions at 1733754613472 (+10 ms)Region opened successfully at 1733754613476 (+4 ms) 2024-12-09T14:30:13,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:30:13,477 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:30:13,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:30:13,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:30:13,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:30:13,477 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:30:13,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754613477Disabling compacts and flushes for region at 1733754613477Disabling writes for close at 1733754613477Writing region close event to WAL at 1733754613477Closed at 1733754613477 2024-12-09T14:30:13,478 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:30:13,478 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T14:30:13,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T14:30:13,480 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:30:13,481 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T14:30:13,501 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(746): ClusterId : 19d3d302-06ea-48f2-8b87-4295b190cf90 2024-12-09T14:30:13,501 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T14:30:13,503 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T14:30:13,503 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T14:30:13,505 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T14:30:13,505 DEBUG [RS:0;f4e784dc7cb5:33515 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be5006b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f4e784dc7cb5/172.17.0.3:0 2024-12-09T14:30:13,518 DEBUG [RS:0;f4e784dc7cb5:33515 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f4e784dc7cb5:33515 2024-12-09T14:30:13,518 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T14:30:13,518 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T14:30:13,518 DEBUG [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T14:30:13,519 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(2659): reportForDuty to master=f4e784dc7cb5,42915,1733754613232 with port=33515, startcode=1733754613286 2024-12-09T14:30:13,519 DEBUG [RS:0;f4e784dc7cb5:33515 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T14:30:13,522 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45795, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T14:30:13,522 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42915 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:13,522 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42915 {}] master.ServerManager(517): Registering regionserver=f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:13,524 DEBUG [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223 2024-12-09T14:30:13,524 DEBUG [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41491 2024-12-09T14:30:13,524 DEBUG [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T14:30:13,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:30:13,526 DEBUG [RS:0;f4e784dc7cb5:33515 {}] zookeeper.ZKUtil(111): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:13,526 WARN [RS:0;f4e784dc7cb5:33515 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T14:30:13,527 INFO [RS:0;f4e784dc7cb5:33515 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:30:13,527 DEBUG [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:13,528 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f4e784dc7cb5,33515,1733754613286] 2024-12-09T14:30:13,533 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T14:30:13,534 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T14:30:13,535 INFO [RS:0;f4e784dc7cb5:33515 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T14:30:13,535 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,535 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T14:30:13,536 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T14:30:13,536 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,536 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,536 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,536 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,536 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,536 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,536 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=2, maxPoolSize=2 2024-12-09T14:30:13,536 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,536 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,537 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,537 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,537 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,537 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f4e784dc7cb5:0, corePoolSize=1, maxPoolSize=1 2024-12-09T14:30:13,537 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:30:13,537 DEBUG [RS:0;f4e784dc7cb5:33515 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f4e784dc7cb5:0, corePoolSize=3, maxPoolSize=3 2024-12-09T14:30:13,537 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,537 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,537 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,537 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,537 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,537 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,33515,1733754613286-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:30:13,557 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T14:30:13,557 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,33515,1733754613286-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,557 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,557 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.Replication(171): f4e784dc7cb5,33515,1733754613286 started 2024-12-09T14:30:13,576 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:13,576 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1482): Serving as f4e784dc7cb5,33515,1733754613286, RpcServer on f4e784dc7cb5/172.17.0.3:33515, sessionid=0x1012b977e020001 2024-12-09T14:30:13,576 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T14:30:13,576 DEBUG [RS:0;f4e784dc7cb5:33515 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:13,576 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,33515,1733754613286' 2024-12-09T14:30:13,576 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T14:30:13,577 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T14:30:13,577 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T14:30:13,577 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T14:30:13,577 DEBUG [RS:0;f4e784dc7cb5:33515 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:13,577 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f4e784dc7cb5,33515,1733754613286' 2024-12-09T14:30:13,577 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T14:30:13,578 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T14:30:13,578 DEBUG [RS:0;f4e784dc7cb5:33515 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T14:30:13,578 INFO [RS:0;f4e784dc7cb5:33515 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T14:30:13,578 INFO [RS:0;f4e784dc7cb5:33515 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T14:30:13,631 WARN [f4e784dc7cb5:42915 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T14:30:13,680 INFO [RS:0;f4e784dc7cb5:33515 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C33515%2C1733754613286, suffix=, logDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/f4e784dc7cb5,33515,1733754613286, archiveDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/oldWALs, maxLogs=32 2024-12-09T14:30:13,681 INFO [RS:0;f4e784dc7cb5:33515 {}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C33515%2C1733754613286.1733754613681 2024-12-09T14:30:13,690 INFO [RS:0;f4e784dc7cb5:33515 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/f4e784dc7cb5,33515,1733754613286/f4e784dc7cb5%2C33515%2C1733754613286.1733754613681 2024-12-09T14:30:13,693 DEBUG [RS:0;f4e784dc7cb5:33515 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35075:35075),(127.0.0.1/127.0.0.1:42543:42543)] 2024-12-09T14:30:13,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:13,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:13,881 DEBUG [f4e784dc7cb5:42915 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T14:30:13,882 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:13,883 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,33515,1733754613286, state=OPENING 2024-12-09T14:30:13,884 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T14:30:13,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:13,887 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T14:30:13,887 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:30:13,887 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:30:13,887 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,33515,1733754613286}] 2024-12-09T14:30:14,040 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T14:30:14,042 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59433, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T14:30:14,047 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T14:30:14,047 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:30:14,049 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f4e784dc7cb5%2C33515%2C1733754613286.meta, suffix=.meta, logDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/f4e784dc7cb5,33515,1733754613286, archiveDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/oldWALs, maxLogs=32 2024-12-09T14:30:14,049 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor f4e784dc7cb5%2C33515%2C1733754613286.meta.1733754614049.meta 2024-12-09T14:30:14,063 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/f4e784dc7cb5,33515,1733754613286/f4e784dc7cb5%2C33515%2C1733754613286.meta.1733754614049.meta 2024-12-09T14:30:14,067 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42543:42543),(127.0.0.1/127.0.0.1:35075:35075)] 2024-12-09T14:30:14,069 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T14:30:14,070 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T14:30:14,070 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T14:30:14,070 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T14:30:14,070 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T14:30:14,070 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T14:30:14,070 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T14:30:14,070 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T14:30:14,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T14:30:14,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T14:30:14,076 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:14,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:14,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T14:30:14,077 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T14:30:14,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:14,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:14,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T14:30:14,079 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T14:30:14,079 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:14,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:14,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T14:30:14,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T14:30:14,081 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T14:30:14,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T14:30:14,081 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T14:30:14,082 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740 2024-12-09T14:30:14,083 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740 2024-12-09T14:30:14,085 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T14:30:14,085 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T14:30:14,085 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T14:30:14,087 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T14:30:14,088 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874783, jitterRate=0.11234448850154877}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T14:30:14,088 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T14:30:14,089 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733754614070Writing region info on filesystem at 1733754614071 (+1 ms)Initializing all the Stores at 1733754614072 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754614072Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754614074 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733754614074Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733754614074Cleaning up temporary data from old regions at 1733754614085 (+11 ms)Running coprocessor post-open hooks at 1733754614088 (+3 ms)Region opened successfully at 1733754614089 (+1 ms) 2024-12-09T14:30:14,090 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733754614040 2024-12-09T14:30:14,093 DEBUG [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T14:30:14,093 INFO [RS_OPEN_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T14:30:14,094 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:14,095 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f4e784dc7cb5,33515,1733754613286, state=OPEN 2024-12-09T14:30:14,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:30:14,102 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:14,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T14:30:14,102 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:30:14,102 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T14:30:14,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T14:30:14,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f4e784dc7cb5,33515,1733754613286 in 215 msec 2024-12-09T14:30:14,107 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T14:30:14,107 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 627 msec 2024-12-09T14:30:14,108 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T14:30:14,108 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T14:30:14,109 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:30:14,109 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,33515,1733754613286, seqNum=-1] 2024-12-09T14:30:14,109 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:30:14,111 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35461, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:30:14,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 681 msec 2024-12-09T14:30:14,115 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733754614115, completionTime=-1 2024-12-09T14:30:14,115 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T14:30:14,115 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T14:30:14,117 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T14:30:14,117 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733754674117 2024-12-09T14:30:14,117 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733754734117 2024-12-09T14:30:14,117 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T14:30:14,117 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,42915,1733754613232-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:14,118 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,42915,1733754613232-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:14,118 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,42915,1733754613232-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:14,118 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f4e784dc7cb5:42915, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:14,118 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:14,119 DEBUG [master/f4e784dc7cb5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T14:30:14,124 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:14,127 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.810sec 2024-12-09T14:30:14,127 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T14:30:14,127 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T14:30:14,127 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T14:30:14,127 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T14:30:14,127 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T14:30:14,127 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,42915,1733754613232-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T14:30:14,128 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,42915,1733754613232-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T14:30:14,131 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T14:30:14,131 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T14:30:14,131 INFO [master/f4e784dc7cb5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f4e784dc7cb5,42915,1733754613232-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T14:30:14,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb38aaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:30:14,201 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f4e784dc7cb5,42915,-1 for getting cluster id 2024-12-09T14:30:14,201 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T14:30:14,203 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '19d3d302-06ea-48f2-8b87-4295b190cf90' 2024-12-09T14:30:14,203 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T14:30:14,203 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "19d3d302-06ea-48f2-8b87-4295b190cf90" 2024-12-09T14:30:14,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bb83a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:30:14,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f4e784dc7cb5,42915,-1] 2024-12-09T14:30:14,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T14:30:14,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:14,205 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33468, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T14:30:14,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10391316, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T14:30:14,206 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T14:30:14,207 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f4e784dc7cb5,33515,1733754613286, seqNum=-1] 2024-12-09T14:30:14,207 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T14:30:14,208 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60276, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T14:30:14,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:14,210 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T14:30:14,214 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T14:30:14,214 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T14:30:14,216 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/test.com,8080,1, archiveDir=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/oldWALs, maxLogs=32 2024-12-09T14:30:14,217 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733754614217 2024-12-09T14:30:14,224 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/test.com,8080,1/test.com%2C8080%2C1.1733754614217 2024-12-09T14:30:14,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35075:35075),(127.0.0.1/127.0.0.1:42543:42543)] 2024-12-09T14:30:14,234 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733754614234 2024-12-09T14:30:14,242 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,242 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,242 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,242 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,242 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,242 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/test.com,8080,1/test.com%2C8080%2C1.1733754614217 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/test.com,8080,1/test.com%2C8080%2C1.1733754614234 2024-12-09T14:30:14,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741835_1011 (size=93) 2024-12-09T14:30:14,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741835_1011 (size=93) 2024-12-09T14:30:14,245 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/WALs/test.com,8080,1/test.com%2C8080%2C1.1733754614217 to hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/oldWALs/test.com%2C8080%2C1.1733754614217 2024-12-09T14:30:14,246 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35075:35075),(127.0.0.1/127.0.0.1:42543:42543)] 2024-12-09T14:30:14,246 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,246 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,246 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,246 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,247 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741836_1012 (size=93) 2024-12-09T14:30:14,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741836_1012 (size=93) 2024-12-09T14:30:14,251 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/oldWALs 2024-12-09T14:30:14,251 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733754614234) 2024-12-09T14:30:14,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T14:30:14,251 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:30:14,251 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:30:14,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:14,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:14,251 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T14:30:14,252 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T14:30:14,252 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1259881703, stopped=false 2024-12-09T14:30:14,252 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f4e784dc7cb5,42915,1733754613232 2024-12-09T14:30:14,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:30:14,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:14,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T14:30:14,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:14,254 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:30:14,254 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T14:30:14,254 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:30:14,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:14,254 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f4e784dc7cb5,33515,1733754613286' ***** 2024-12-09T14:30:14,254 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(959): stopping server f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:14,255 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:30:14,255 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f4e784dc7cb5:33515. 2024-12-09T14:30:14,255 DEBUG [RS:0;f4e784dc7cb5:33515 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T14:30:14,255 DEBUG [RS:0;f4e784dc7cb5:33515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T14:30:14,255 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T14:30:14,255 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T14:30:14,258 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T14:30:14,258 DEBUG [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T14:30:14,258 DEBUG [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T14:30:14,258 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T14:30:14,259 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T14:30:14,259 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T14:30:14,259 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T14:30:14,259 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T14:30:14,259 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T14:30:14,284 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740/.tmp/ns/20633ccea67e4ec6a3d1d11357f3f4db is 43, key is default/ns:d/1733754614111/Put/seqid=0 2024-12-09T14:30:14,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741837_1013 (size=5153) 2024-12-09T14:30:14,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741837_1013 (size=5153) 2024-12-09T14:30:14,290 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740/.tmp/ns/20633ccea67e4ec6a3d1d11357f3f4db 2024-12-09T14:30:14,296 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740/.tmp/ns/20633ccea67e4ec6a3d1d11357f3f4db as hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740/ns/20633ccea67e4ec6a3d1d11357f3f4db 2024-12-09T14:30:14,314 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740/ns/20633ccea67e4ec6a3d1d11357f3f4db, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T14:30:14,316 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 57ms, sequenceid=6, compaction requested=false 2024-12-09T14:30:14,316 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T14:30:14,339 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T14:30:14,339 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T14:30:14,340 INFO [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T14:30:14,340 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733754614258Running coprocessor pre-close hooks at 1733754614258Disabling compacts and flushes for region at 1733754614258Disabling writes for close at 1733754614259 (+1 ms)Obtaining lock to block concurrent updates at 1733754614259Preparing flush snapshotting stores in 1588230740 at 1733754614259Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733754614259Flushing stores of hbase:meta,,1.1588230740 at 1733754614260 (+1 ms)Flushing 1588230740/ns: creating writer at 1733754614260Flushing 1588230740/ns: appending metadata at 1733754614284 (+24 ms)Flushing 1588230740/ns: closing flushed file at 1733754614284Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@192ebbac: reopening flushed file at 1733754614295 (+11 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 57ms, sequenceid=6, compaction requested=false at 1733754614316 (+21 ms)Writing region close event to WAL at 1733754614333 (+17 ms)Running coprocessor post-close hooks at 1733754614339 (+6 ms)Closed at 1733754614340 (+1 ms) 2024-12-09T14:30:14,340 DEBUG [RS_CLOSE_META-regionserver/f4e784dc7cb5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T14:30:14,459 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(976): stopping server f4e784dc7cb5,33515,1733754613286; all regions closed. 2024-12-09T14:30:14,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,460 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,460 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741834_1010 (size=1152) 2024-12-09T14:30:14,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741834_1010 (size=1152) 2024-12-09T14:30:14,469 DEBUG [RS:0;f4e784dc7cb5:33515 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/oldWALs 2024-12-09T14:30:14,469 INFO [RS:0;f4e784dc7cb5:33515 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C33515%2C1733754613286.meta:.meta(num 1733754614049) 2024-12-09T14:30:14,470 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,470 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,470 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,470 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,470 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:14,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741833_1009 (size=93) 2024-12-09T14:30:14,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741833_1009 (size=93) 2024-12-09T14:30:14,475 DEBUG [RS:0;f4e784dc7cb5:33515 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/oldWALs 2024-12-09T14:30:14,475 INFO [RS:0;f4e784dc7cb5:33515 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog f4e784dc7cb5%2C33515%2C1733754613286:(num 1733754613681) 2024-12-09T14:30:14,475 DEBUG [RS:0;f4e784dc7cb5:33515 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T14:30:14,475 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T14:30:14,475 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:30:14,475 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.ChoreService(370): Chore service for: regionserver/f4e784dc7cb5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T14:30:14,475 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:30:14,476 INFO [regionserver/f4e784dc7cb5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:30:14,476 INFO [RS:0;f4e784dc7cb5:33515 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33515 2024-12-09T14:30:14,478 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:30:14,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T14:30:14,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f4e784dc7cb5,33515,1733754613286 2024-12-09T14:30:14,480 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f4e784dc7cb5,33515,1733754613286] 2024-12-09T14:30:14,482 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f4e784dc7cb5,33515,1733754613286 already deleted, retry=false 2024-12-09T14:30:14,482 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f4e784dc7cb5,33515,1733754613286 expired; onlineServers=0 2024-12-09T14:30:14,482 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f4e784dc7cb5,42915,1733754613232' ***** 2024-12-09T14:30:14,482 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T14:30:14,482 INFO [M:0;f4e784dc7cb5:42915 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T14:30:14,482 INFO [M:0;f4e784dc7cb5:42915 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T14:30:14,482 DEBUG [M:0;f4e784dc7cb5:42915 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T14:30:14,482 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T14:30:14,483 DEBUG [M:0;f4e784dc7cb5:42915 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T14:30:14,483 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754613444 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.large.0-1733754613444,5,FailOnTimeoutGroup] 2024-12-09T14:30:14,483 DEBUG [master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754613444 {}] cleaner.HFileCleaner(306): Exit Thread[master/f4e784dc7cb5:0:becomeActiveMaster-HFileCleaner.small.0-1733754613444,5,FailOnTimeoutGroup] 2024-12-09T14:30:14,483 INFO [M:0;f4e784dc7cb5:42915 {}] hbase.ChoreService(370): Chore service for: master/f4e784dc7cb5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T14:30:14,483 INFO [M:0;f4e784dc7cb5:42915 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T14:30:14,483 DEBUG [M:0;f4e784dc7cb5:42915 {}] master.HMaster(1795): Stopping service threads 2024-12-09T14:30:14,483 INFO [M:0;f4e784dc7cb5:42915 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T14:30:14,483 INFO [M:0;f4e784dc7cb5:42915 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T14:30:14,483 INFO [M:0;f4e784dc7cb5:42915 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T14:30:14,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T14:30:14,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T14:30:14,485 DEBUG [M:0;f4e784dc7cb5:42915 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T14:30:14,485 DEBUG [M:0;f4e784dc7cb5:42915 {}] master.ActiveMasterManager(353): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T14:30:14,486 INFO [M:0;f4e784dc7cb5:42915 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/.lastflushedseqids 2024-12-09T14:30:14,486 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T14:30:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741838_1014 (size=99) 2024-12-09T14:30:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741838_1014 (size=99) 2024-12-09T14:30:14,494 INFO [M:0;f4e784dc7cb5:42915 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T14:30:14,495 INFO [M:0;f4e784dc7cb5:42915 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T14:30:14,495 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T14:30:14,495 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:14,495 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:14,495 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T14:30:14,495 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:14,495 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T14:30:14,521 DEBUG [M:0;f4e784dc7cb5:42915 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cd55afce58448aa8aa5e9f3ad9f9d3f is 82, key is hbase:meta,,1/info:regioninfo/1733754614094/Put/seqid=0 2024-12-09T14:30:14,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741839_1015 (size=5672) 2024-12-09T14:30:14,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741839_1015 (size=5672) 2024-12-09T14:30:14,526 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cd55afce58448aa8aa5e9f3ad9f9d3f 2024-12-09T14:30:14,548 DEBUG [M:0;f4e784dc7cb5:42915 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ad486f88cd6f4cfaaaa1c7ab25f147f2 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733754614114/Put/seqid=0 2024-12-09T14:30:14,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741840_1016 (size=5275) 2024-12-09T14:30:14,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741840_1016 (size=5275) 2024-12-09T14:30:14,580 INFO [RS:0;f4e784dc7cb5:33515 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:30:14,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:30:14,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33515-0x1012b977e020001, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:30:14,580 INFO [RS:0;f4e784dc7cb5:33515 {}] regionserver.HRegionServer(1031): Exiting; stopping=f4e784dc7cb5,33515,1733754613286; zookeeper connection closed. 2024-12-09T14:30:14,580 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ab18945 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ab18945 2024-12-09T14:30:14,581 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T14:30:14,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41215,1733754398471/f4e784dc7cb5%2C41215%2C1733754398471.1733754398662 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:14,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43639/user/jenkins/test-data/631c5383-72c0-7c10-9a71-820b56f461bd/WALs/f4e784dc7cb5,41507,1733754397420/f4e784dc7cb5%2C41507%2C1733754397420.meta.1733754398281.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T14:30:14,956 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ad486f88cd6f4cfaaaa1c7ab25f147f2 2024-12-09T14:30:14,982 DEBUG [M:0;f4e784dc7cb5:42915 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/21908137906e45688a6b4dbd5f83da30 is 69, key is f4e784dc7cb5,33515,1733754613286/rs:state/1733754613523/Put/seqid=0 2024-12-09T14:30:14,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741841_1017 (size=5156) 2024-12-09T14:30:14,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741841_1017 (size=5156) 2024-12-09T14:30:14,988 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/21908137906e45688a6b4dbd5f83da30 2024-12-09T14:30:15,020 DEBUG [M:0;f4e784dc7cb5:42915 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0d75988cd12341dc8f79fef668c02be0 is 52, key is load_balancer_on/state:d/1733754614212/Put/seqid=0 2024-12-09T14:30:15,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741842_1018 (size=5056) 2024-12-09T14:30:15,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741842_1018 (size=5056) 2024-12-09T14:30:15,027 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0d75988cd12341dc8f79fef668c02be0 2024-12-09T14:30:15,035 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cd55afce58448aa8aa5e9f3ad9f9d3f as hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5cd55afce58448aa8aa5e9f3ad9f9d3f 2024-12-09T14:30:15,040 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5cd55afce58448aa8aa5e9f3ad9f9d3f, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T14:30:15,041 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ad486f88cd6f4cfaaaa1c7ab25f147f2 as hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ad486f88cd6f4cfaaaa1c7ab25f147f2 2024-12-09T14:30:15,045 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ad486f88cd6f4cfaaaa1c7ab25f147f2, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T14:30:15,046 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/21908137906e45688a6b4dbd5f83da30 as hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/21908137906e45688a6b4dbd5f83da30 2024-12-09T14:30:15,050 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/21908137906e45688a6b4dbd5f83da30, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T14:30:15,051 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0d75988cd12341dc8f79fef668c02be0 as hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0d75988cd12341dc8f79fef668c02be0 2024-12-09T14:30:15,055 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41491/user/jenkins/test-data/8254bb29-d3e3-08a2-9ffd-f644f25da223/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0d75988cd12341dc8f79fef668c02be0, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T14:30:15,056 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 561ms, sequenceid=29, compaction requested=false 2024-12-09T14:30:15,062 INFO [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T14:30:15,062 DEBUG [M:0;f4e784dc7cb5:42915 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733754614495Disabling compacts and flushes for region at 1733754614495Disabling writes for close at 1733754614495Obtaining lock to block concurrent updates at 1733754614495Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733754614495Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733754614496 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733754614499 (+3 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733754614499Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733754614520 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733754614520Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733754614532 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733754614548 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733754614548Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733754614961 (+413 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733754614981 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733754614981Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733754614995 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733754615019 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733754615019Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67f6465c: reopening flushed file at 1733754615034 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49728b80: reopening flushed file at 1733754615040 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76c57126: reopening flushed file at 1733754615045 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19332d0b: reopening flushed file at 1733754615050 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 561ms, sequenceid=29, compaction requested=false at 1733754615056 (+6 ms)Writing region close event to WAL at 1733754615062 (+6 ms)Closed at 1733754615062 2024-12-09T14:30:15,063 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:15,063 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:15,063 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:15,063 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:15,063 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T14:30:15,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43947 is added to blk_1073741830_1006 (size=10311) 2024-12-09T14:30:15,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36731 is added to blk_1073741830_1006 (size=10311) 2024-12-09T14:30:15,066 INFO [M:0;f4e784dc7cb5:42915 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T14:30:15,066 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T14:30:15,066 INFO [M:0;f4e784dc7cb5:42915 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42915 2024-12-09T14:30:15,066 INFO [M:0;f4e784dc7cb5:42915 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T14:30:15,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:30:15,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42915-0x1012b977e020000, quorum=127.0.0.1:60611, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T14:30:15,168 INFO [M:0;f4e784dc7cb5:42915 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T14:30:15,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@372003a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:30:15,175 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60fdf071{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:30:15,175 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:30:15,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@231297c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:30:15,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20339ff2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/hadoop.log.dir/,STOPPED} 2024-12-09T14:30:15,179 WARN [BP-1003568373-172.17.0.3-1733754612446 heartbeating to localhost/127.0.0.1:41491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:30:15,179 WARN [BP-1003568373-172.17.0.3-1733754612446 heartbeating to localhost/127.0.0.1:41491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1003568373-172.17.0.3-1733754612446 (Datanode Uuid 2e2c5644-7ecf-43f3-bbe3-6335aff5849e) service to localhost/127.0.0.1:41491 2024-12-09T14:30:15,179 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:30:15,179 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:30:15,180 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/data/data3/current/BP-1003568373-172.17.0.3-1733754612446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:30:15,180 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/data/data4/current/BP-1003568373-172.17.0.3-1733754612446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:30:15,180 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:30:15,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8035060{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T14:30:15,187 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a4098d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:30:15,187 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:30:15,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@677f535e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:30:15,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22748d48{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/hadoop.log.dir/,STOPPED} 2024-12-09T14:30:15,192 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T14:30:15,192 WARN [BP-1003568373-172.17.0.3-1733754612446 heartbeating to localhost/127.0.0.1:41491 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T14:30:15,192 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T14:30:15,192 WARN [BP-1003568373-172.17.0.3-1733754612446 heartbeating to localhost/127.0.0.1:41491 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1003568373-172.17.0.3-1733754612446 (Datanode Uuid b920a460-cab3-4116-b7cc-48035232ea49) service to localhost/127.0.0.1:41491 2024-12-09T14:30:15,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/data/data1/current/BP-1003568373-172.17.0.3-1733754612446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:30:15,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/cluster_1cfe4a1d-8f80-6d48-42bf-f3105291b6a7/data/data2/current/BP-1003568373-172.17.0.3-1733754612446 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T14:30:15,193 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T14:30:15,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35722c03{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T14:30:15,200 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d175de5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T14:30:15,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T14:30:15,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30964c14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T14:30:15,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64c76f50{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33e4b910-7384-8a00-3d5a-fa0d034a737d/hadoop.log.dir/,STOPPED} 2024-12-09T14:30:15,210 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T14:30:15,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T14:30:15,237 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 226) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41491 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:41491 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41491 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41491 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41491 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=171 (was 171), ProcessCount=11 (was 11), AvailableMemoryMB=6130 (was 5449) - AvailableMemoryMB LEAK? -